pax_global_header00006660000000000000000000000064135266174460014530gustar00rootroot0000000000000052 comment=840cb6ff177ae933d15a57b49a9b87c3926a2240 redis-py-cluster-2.0.0/000077500000000000000000000000001352661744600147425ustar00rootroot00000000000000redis-py-cluster-2.0.0/.gitignore000066400000000000000000000002331352661744600167300ustar00rootroot00000000000000# Ignore all python compiled files *.pyc *.swp env27* .tox .coverage* dump.rdb redis-git htmlcov dist build *.egg-info .cache docs/_build docs/_build_html redis-py-cluster-2.0.0/.travis.yml000066400000000000000000000034511352661744600170560ustar00rootroot00000000000000sudo: false dist: xenial language: python cache: pip python: - "2.7" - "3.4" - "3.5" - "3.6" - "nightly" services: - redis-server install: - "if [[ $REDIS_VERSION == '3.0' ]]; then REDIS_VERSION=3.0 make redis-install; fi" - "if [[ $REDIS_VERSION == '3.2' ]]; then REDIS_VERSION=3.2 make redis-install; fi" - "if [[ $REDIS_VERSION == '4.0' ]]; then REDIS_VERSION=4.0 make redis-install; fi" - "if [[ $REDIS_VERSION == '5.0' ]]; then REDIS_VERSION=5.0 make redis-install; fi" - "if [[ $TEST_PYCODESTYLE == '1' ]]; then pip install pycodestyle; fi" - pip install -r dev-requirements.txt - pip install -e . - "if [[ $HIREDIS == '1' ]]; then pip install hiredis; fi" - "pip freeze | grep redis" - "pip freeze" env: # Redis 3.0 & HIREDIS - HIREDIS=0 REDIS_VERSION=3.0 - HIREDIS=1 REDIS_VERSION=3.0 # Redis 3.2 & HIREDIS - HIREDIS=0 REDIS_VERSION=3.2 - HIREDIS=1 REDIS_VERSION=3.2 # Redis 4.0 & HIREDIS - HIREDIS=0 REDIS_VERSION=4.0 - HIREDIS=1 REDIS_VERSION=4.0 # Redis 5.0 & HIREDIS - HIREDIS=0 REDIS_VERSION=5.0 - HIREDIS=1 REDIS_VERSION=5.0 script: - make start - coverage erase - coverage run --source rediscluster -p -m py.test - py.test - make stop after_success: - coverage combine - coveralls - "if [[ $TEST_PYCODESTYLE == '1' ]]; then pycodestyle --repeat --show-source --exclude=.venv,.tox,dist,docs,build,*.egg,redis_install .; fi" matrix: allow_failures: - python: "nightly" - python: 2.7 env: TEST_PYCODESTYLE=1 - python: 3.6 env: TEST_PYCODESTYLE=1 # python 3.7 has to be specified manually in the matrix # https://github.com/travis-ci/travis-ci/issues/9815 - python: 3.7 dist: xenial sudo: true env: TEST_HIREDIS=0 - python: 3.7 dist: xenial sudo: true env: TEST_HIREDIS=1 redis-py-cluster-2.0.0/CONTRIBUTING.md000066400000000000000000000052771352661744600172060ustar00rootroot00000000000000 # Pull Request For bug fixes you should provide some information about how to reproduce the problem so it can be verified if the new code solves the bug. All CI tests must pass (Travis-CI) Follow the code quality standards described in this file. You are responsible for ensuring the code is mergable and fix any issues that can occur if other code was merged before your code. Allways ensure docs is up to date based on your changes. If docs is missing and you think it should exists you are responsible to write it. For all PR you should do/include the following - A line about the change in the `CHANGES` file Add it in the section `Next release`, create it if needed. - If you change something already implemented, for example add/remove argument you should add a line in `docs/Upgrading.md` describing how to migrate existing code from the old to the new code. Add it in the section `Next release`, create it if needed. - Add yourself to `docs/Authors` file (This is optional if you want) # Code standard In general, you should follow the established pep8 coding standard, but with the following exceptions/changes. https://www.python.org/dev/peps/pep-0008/ - The default max line length (80) should not be followed religiously. Instead try to not exceed ~140 characters. Use the `flake8` tool to ensure you have good code quality. - Try to document as much as possible in the method docstring and avoid doc inside the code. Code should describe itself as much as possible. - Follow the `KISS` rule and `Make it work first, optimize later` - When indenting, try to indent with json style. For example: ``` # Do not use this style from foo import (bar, qwe, rty, foobar, barfoo) print("foobar {barfoo} {qwert}".format(barfoo=foo, qwerty=bar)) ``` ``` # Use this style instead from foo import ( bar, qwe, rty, foobar, barfoo, ) print("foobar {barfoo} {qwert}".format( barfoo=foo, qwerty=bar)) ``` # Tests I (Johan/Grokzen) have been allowed (by andymccurdy) explicitly to use all test code that already exists inside `redis-py` lib. If possible you should reuse code that exists in there. All code should aim to have 100% test coverage. This is just a target and not a requirements. All new features must implement tests to show that it works as intended. All implemented tests must pass on all supported python versions. List of supported versions can be found in the `README.md`. All tests should be assumed to work against the test environment that is implemented when running in `travis-ci`. Currently that means 6 nodes in the cluster, 3 masters, 3 slaves, using port `7000-7005` and the node on port `7000` must be accessible on `127.0.0.1` redis-py-cluster-2.0.0/LICENSE000066400000000000000000000020731352661744600157510ustar00rootroot00000000000000Copyright (c) 2014-2019 Johan Andersson Permission is hereby granted, free of charge, to any person obtaining a copy of this software and associated documentation files (the "Software"), to deal in the Software without restriction, including without limitation the rights to use, copy, modify, merge, publish, distribute, sublicense, and/or sell copies of the Software, and to permit persons to whom the Software is furnished to do so, subject to the following conditions: The above copyright notice and this permission notice shall be included in all copies or substantial portions of the Software. THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE. redis-py-cluster-2.0.0/MANIFEST.in000066400000000000000000000002211352661744600164730ustar00rootroot00000000000000exclude *.py include docs/authors.rst include docs/License.txt include docs/release-notes.rst include CHANGES include setup.py include README.md redis-py-cluster-2.0.0/Makefile000066400000000000000000000275341352661744600164150ustar00rootroot00000000000000PATH := ./redis-git/src:${PATH} # CLUSTER REDIS NODES define REDIS_CLUSTER_NODE1_CONF daemonize yes port 7000 cluster-node-timeout 5000 pidfile /tmp/redis_cluster_node1.pid logfile /tmp/redis_cluster_node1.log save "" appendonly no cluster-enabled yes cluster-config-file /tmp/redis_cluster_node1.conf endef define REDIS_CLUSTER_NODE2_CONF daemonize yes port 7001 cluster-node-timeout 5000 pidfile /tmp/redis_cluster_node2.pid logfile /tmp/redis_cluster_node2.log save "" appendonly no cluster-enabled yes cluster-config-file /tmp/redis_cluster_node2.conf endef define REDIS_CLUSTER_NODE3_CONF daemonize yes port 7002 cluster-node-timeout 5000 pidfile /tmp/redis_cluster_node3.pid logfile /tmp/redis_cluster_node3.log save "" appendonly no cluster-enabled yes cluster-config-file /tmp/redis_cluster_node3.conf endef define REDIS_CLUSTER_NODE4_CONF daemonize yes port 7003 cluster-node-timeout 5000 pidfile /tmp/redis_cluster_node4.pid logfile /tmp/redis_cluster_node4.log save "" appendonly no cluster-enabled yes cluster-config-file /tmp/redis_cluster_node4.conf endef define REDIS_CLUSTER_NODE5_CONF daemonize yes port 7004 cluster-node-timeout 5000 pidfile /tmp/redis_cluster_node5.pid logfile /tmp/redis_cluster_node5.log save "" appendonly no cluster-enabled yes cluster-config-file /tmp/redis_cluster_node5.conf endef define REDIS_CLUSTER_NODE6_CONF daemonize yes port 7005 cluster-node-timeout 5000 pidfile /tmp/redis_cluster_node6.pid logfile /tmp/redis_cluster_node6.log save "" appendonly no cluster-enabled yes cluster-config-file /tmp/redis_cluster_node6.conf endef define REDIS_CLUSTER_NODE7_CONF daemonize yes port 7006 cluster-node-timeout 5000 pidfile /tmp/redis_cluster_node7.pid logfile /tmp/redis_cluster_node7.log save "" appendonly no cluster-enabled yes cluster-config-file /tmp/redis_cluster_node7.conf endef define REDIS_CLUSTER_NODE8_CONF daemonize yes port 7007 cluster-node-timeout 5000 pidfile /tmp/redis_cluster_node8.pid logfile /tmp/redis_cluster_node8.log save "" appendonly no cluster-enabled yes cluster-config-file /tmp/redis_cluster_node8.conf endef # CLUSTER REDIS PASSWORD PROTECTED NODES define REDIS_CLUSTER_PASSWORD_PROTECTED_NODE1_CONF daemonize yes port 7100 cluster-node-timeout 5000 pidfile /tmp/redis_cluster_password_protected_node1.pid logfile /tmp/redis_cluster_password_protected_node1.log save "" masterauth password_is_protected requirepass password_is_protected appendonly no cluster-enabled yes cluster-config-file /tmp/redis_cluster_password_protected_node1.conf endef define REDIS_CLUSTER_PASSWORD_PROTECTED_NODE2_CONF daemonize yes port 7101 cluster-node-timeout 5000 pidfile /tmp/redis_cluster_password_protected_node2.pid logfile /tmp/redis_cluster_password_protected_node2.log save "" masterauth password_is_protected requirepass password_is_protected appendonly no cluster-enabled yes cluster-config-file /tmp/redis_cluster_password_protected_node2.conf endef define REDIS_CLUSTER_PASSWORD_PROTECTED_NODE3_CONF daemonize yes port 7102 cluster-node-timeout 5000 pidfile /tmp/redis_cluster_password_protected_node3.pid logfile /tmp/redis_cluster_password_protected_node3.log save "" masterauth password_is_protected requirepass password_is_protected appendonly no cluster-enabled yes cluster-config-file /tmp/redis_cluster_password_protected_node3.conf endef define REDIS_CLUSTER_PASSWORD_PROTECTED_NODE4_CONF daemonize yes port 7103 cluster-node-timeout 5000 pidfile /tmp/redis_cluster_password_protected_node4.pid logfile /tmp/redis_cluster_password_protected_node4.log save "" masterauth password_is_protected requirepass password_is_protected appendonly no cluster-enabled yes cluster-config-file /tmp/redis_cluster_password_protected_node4.conf endef define REDIS_CLUSTER_PASSWORD_PROTECTED_NODE5_CONF daemonize yes port 7104 cluster-node-timeout 5000 pidfile /tmp/redis_cluster_password_protected_node5.pid logfile /tmp/redis_cluster_password_protected_node5.log save "" masterauth password_is_protected requirepass password_is_protected appendonly no cluster-enabled yes cluster-config-file /tmp/redis_cluster_password_protected_node5.conf endef define REDIS_CLUSTER_PASSWORD_PROTECTED_NODE6_CONF daemonize yes port 7105 cluster-node-timeout 5000 pidfile /tmp/redis_cluster_password_protected_node6.pid logfile /tmp/redis_cluster_password_protected_node6.log save "" masterauth password_is_protected requirepass password_is_protected appendonly no cluster-enabled yes cluster-config-file /tmp/redis_cluster_password_protected_node6.conf endef define REDIS_CLUSTER_PASSWORD_PROTECTED_NODE7_CONF daemonize yes port 7106 cluster-node-timeout 5000 pidfile /tmp/redis_cluster_password_protected_node7.pid logfile /tmp/redis_cluster_password_protected_node7.log save "" masterauth password_is_protected requirepass password_is_protected appendonly no cluster-enabled yes cluster-config-file /tmp/redis_cluster_password_protected_node7.conf endef define REDIS_CLUSTER_PASSWORD_PROTECTED_NODE8_CONF daemonize yes port 7107 cluster-node-timeout 5000 pidfile /tmp/redis_cluster_password_protected_node8.pid logfile /tmp/redis_cluster_password_protected_node8.log save "" masterauth password_is_protected requirepass password_is_protected appendonly no cluster-enabled yes cluster-config-file /tmp/redis_cluster_password_protected_node8.conf endef ifndef REDIS_TRIB_RB REDIS_TRIB_RB=tests/redis-trib.rb endif ifndef REDIS_VERSION REDIS_VERSION=5.0.5 endif export REDIS_CLUSTER_NODE1_CONF export REDIS_CLUSTER_NODE2_CONF export REDIS_CLUSTER_NODE3_CONF export REDIS_CLUSTER_NODE4_CONF export REDIS_CLUSTER_NODE5_CONF export REDIS_CLUSTER_NODE6_CONF export REDIS_CLUSTER_NODE7_CONF export REDIS_CLUSTER_NODE8_CONF export REDIS_CLUSTER_PASSWORD_PROTECTED_NODE1_CONF export REDIS_CLUSTER_PASSWORD_PROTECTED_NODE2_CONF export REDIS_CLUSTER_PASSWORD_PROTECTED_NODE3_CONF export REDIS_CLUSTER_PASSWORD_PROTECTED_NODE4_CONF export REDIS_CLUSTER_PASSWORD_PROTECTED_NODE5_CONF export REDIS_CLUSTER_PASSWORD_PROTECTED_NODE6_CONF export REDIS_CLUSTER_PASSWORD_PROTECTED_NODE7_CONF export REDIS_CLUSTER_PASSWORD_PROTECTED_NODE8_CONF help: @echo "Please use 'make ' where is one of" @echo " clean remove temporary files created by build tools" @echo " cleanmeta removes all META-* and egg-info/ files created by build tools" @echo " cleancov remove all files related to coverage reports" @echo " cleanall all the above + tmp files from development tools" @echo " test run test suite" @echo " sdist make a source distribution" @echo " bdist make an egg distribution" @echo " install install package" @echo " benchmark runs all benchmarks. assumes nodes running on port 7001 and 7007" @echo " *** CI Commands ***" @echo " start starts a test redis cluster" @echo " stop stop all started redis nodes (Started via 'make start' only affected)" @echo " cleanup cleanup files after running a test cluster" @echo " test starts/activates the test cluster nodes and runs tox test" @echo " tox run all tox environments and combine coverage report after" @echo " redis-install checkout latest redis commit --> build --> install ruby dependencies" clean: -rm -f MANIFEST -rm -rf dist/ -rm -rf build/ cleancov: -rm -rf htmlcov/ -coverage combine -coverage erase cleanmeta: -rm -rf redis_py_cluster.egg-info/ cleanall: clean cleancov cleanmeta -find . -type f -name "*~" -exec rm -f "{}" \; -find . -type f -name "*.orig" -exec rm -f "{}" \; -find . -type f -name "*.rej" -exec rm -f "{}" \; -find . -type f -name "*.pyc" -exec rm -f "{}" \; -find . -type f -name "*.parse-index" -exec rm -f "{}" \; sdist: cleanmeta python setup.py sdist bdist: cleanmeta python setup.py bdist_egg install: python setup.py install start: cleanup echo "$$REDIS_CLUSTER_NODE1_CONF" | redis-server - echo "$$REDIS_CLUSTER_NODE2_CONF" | redis-server - echo "$$REDIS_CLUSTER_NODE3_CONF" | redis-server - echo "$$REDIS_CLUSTER_NODE4_CONF" | redis-server - echo "$$REDIS_CLUSTER_NODE5_CONF" | redis-server - echo "$$REDIS_CLUSTER_NODE6_CONF" | redis-server - echo "$$REDIS_CLUSTER_NODE7_CONF" | redis-server - echo "$$REDIS_CLUSTER_NODE8_CONF" | redis-server - echo "$$REDIS_CLUSTER_PASSWORD_PROTECTED_NODE1_CONF" | redis-server - echo "$$REDIS_CLUSTER_PASSWORD_PROTECTED_NODE2_CONF" | redis-server - echo "$$REDIS_CLUSTER_PASSWORD_PROTECTED_NODE3_CONF" | redis-server - echo "$$REDIS_CLUSTER_PASSWORD_PROTECTED_NODE4_CONF" | redis-server - echo "$$REDIS_CLUSTER_PASSWORD_PROTECTED_NODE5_CONF" | redis-server - echo "$$REDIS_CLUSTER_PASSWORD_PROTECTED_NODE6_CONF" | redis-server - echo "$$REDIS_CLUSTER_PASSWORD_PROTECTED_NODE7_CONF" | redis-server - echo "$$REDIS_CLUSTER_PASSWORD_PROTECTED_NODE8_CONF" | redis-server - sleep 5 echo "yes" | ruby $(REDIS_TRIB_RB) create --replicas 1 127.0.0.1:7000 127.0.0.1:7001 127.0.0.1:7002 127.0.0.1:7003 127.0.0.1:7004 127.0.0.1:7005 sleep 5 echo "yes" | ruby $(REDIS_TRIB_RB) create --replicas 1 --password password_is_protected 127.0.0.1:7100 127.0.0.1:7101 127.0.0.1:7102 127.0.0.1:7103 127.0.0.1:7104 127.0.0.1:7105 sleep 5 cleanup: - rm -vf /tmp/redis_cluster_node*.conf 2>/dev/null - rm -vf /tmp/redis_cluster_password_protected_node*.conf 2>/dev/null - rm dump.rdb appendonly.aof - 2>/dev/null stop: kill `cat /tmp/redis_cluster_node1.pid` || true kill `cat /tmp/redis_cluster_node2.pid` || true kill `cat /tmp/redis_cluster_node3.pid` || true kill `cat /tmp/redis_cluster_node4.pid` || true kill `cat /tmp/redis_cluster_node5.pid` || true kill `cat /tmp/redis_cluster_node6.pid` || true kill `cat /tmp/redis_cluster_node7.pid` || true kill `cat /tmp/redis_cluster_node8.pid` || true kill `cat /tmp/redis_cluster_password_protected_node1.pid` || true kill `cat /tmp/redis_cluster_password_protected_node2.pid` || true kill `cat /tmp/redis_cluster_password_protected_node3.pid` || true kill `cat /tmp/redis_cluster_password_protected_node4.pid` || true kill `cat /tmp/redis_cluster_password_protected_node5.pid` || true kill `cat /tmp/redis_cluster_password_protected_node6.pid` || true kill `cat /tmp/redis_cluster_password_protected_node7.pid` || true kill `cat /tmp/redis_cluster_password_protected_node8.pid` || true rm -f /tmp/redis_cluster_node1.conf rm -f /tmp/redis_cluster_node2.conf rm -f /tmp/redis_cluster_node3.conf rm -f /tmp/redis_cluster_node4.conf rm -f /tmp/redis_cluster_node5.conf rm -f /tmp/redis_cluster_node6.conf rm -f /tmp/redis_cluster_node7.conf rm -f /tmp/redis_cluster_node8.conf rm -f /tmp/redis_cluster_password_protected_node1.conf rm -f /tmp/redis_cluster_password_protected_node2.conf rm -f /tmp/redis_cluster_password_protected_node3.conf rm -f /tmp/redis_cluster_password_protected_node4.conf rm -f /tmp/redis_cluster_password_protected_node5.conf rm -f /tmp/redis_cluster_password_protected_node6.conf rm -f /tmp/redis_cluster_password_protected_node7.conf rm -f /tmp/redis_cluster_password_protected_node8.conf test: make start make tox make stop tox: coverage erase tox TEST_PASSWORD_PROTECTED=1 tox coverage combine coverage report clone-redis: [ ! -e redis-git ] && git clone https://github.com/antirez/redis.git redis-git || true cd redis-git && git checkout $(REDIS_VERSION) redis-install: make clone-redis make -C redis-git -j4 gem install redis sleep 3 benchmark: @echo "" @echo " -- Running Simple benchmark with Redis lib and non cluster server --" python benchmarks/simple.py --port 7007 --timeit --nocluster @echo "" @echo " -- Running Simple benchmark with RedisCluster lib and cluster server --" python benchmarks/simple.py --port 7001 --timeit @echo "" @echo " -- Running Simple benchmark with pipelines & Redis lib and non cluster server --" python benchmarks/simple.py --port 7007 --timeit --pipeline --nocluster @echo "" @echo " -- Running Simple benchmark with RedisCluster lib and cluster server" python benchmarks/simple.py --port 7001 --timeit --pipeline ptp: python ptp-debug.py .PHONY: test redis-py-cluster-2.0.0/README.md000066400000000000000000000034731352661744600162300ustar00rootroot00000000000000# redis-py-cluster This client provides a client for redis cluster that was added in redis 3.0. This project is a port of `redis-rb-cluster` by antirez, with alot of added functionality. The original source can be found at https://github.com/antirez/redis-rb-cluster [![Build Status](https://travis-ci.org/Grokzen/redis-py-cluster.svg?branch=master)](https://travis-ci.org/Grokzen/redis-py-cluster) [![Coverage Status](https://coveralls.io/repos/Grokzen/redis-py-cluster/badge.png)](https://coveralls.io/r/Grokzen/redis-py-cluster) [![PyPI version](https://badge.fury.io/py/redis-py-cluster.svg)](http://badge.fury.io/py/redis-py-cluster) # Documentation All documentation can be found at https://redis-py-cluster.readthedocs.io/en/master This Readme contains a reduced version of the full documentation. Upgrading instructions between each released version can be found [here](docs/upgrading.rst) Changelog for next release and all older releasess can be found [here](docs/release-notes.rst) ## Installation Latest stable release from pypi ``` $ pip install redis-py-cluster ``` This major version of `redis-py-cluster` supports `redis-py>=3.0.0,<3.1.0`. ## Usage example Small sample script that shows how to get started with RedisCluster. It can also be found in [examples/basic.py](examples/basic.py) ```python >>> from rediscluster import RedisCluster >>> # Requires at least one node for cluster discovery. Multiple nodes is recommended. >>> startup_nodes = [{"host": "127.0.0.1", "port": "7000"}] >>> rc = RedisCluster(startup_nodes=startup_nodes, decode_responses=True) >>> rc.set("foo", "bar") True >>> print(rc.get("foo")) 'bar' ``` ## License & Authors Copyright (c) 2013-2019 Johan Andersson MIT (See docs/License.txt file) The license should be the same as redis-py (https://github.com/andymccurdy/redis-py) redis-py-cluster-2.0.0/benchmarks/000077500000000000000000000000001352661744600170575ustar00rootroot00000000000000redis-py-cluster-2.0.0/benchmarks/simple.py000066400000000000000000000067511352661744600207330ustar00rootroot00000000000000#!/usr/bin/env python3 # -*- coding:utf-8 -*- """ Usage: redis-cluster-benchmark.py [--host ] [-p ] [-n ] [-c ] [--nocluster] [--timeit] [--pipeline] [--resetlastkey] [-h] [--version] Options: --host Redis server to test against [default: 127.0.0.1] -p Port on redis server [default: 7000] -n Request number [default: 100000] -c Concurrent client number [default: 1] --nocluster If flag is set then Redis will be used instead of cluster lib --timeit Run a mini benchmark to test performance --pipeline Only usable with --timeit flag. Runs SET/GET inside pipelines. --resetlastkey Reset __last__ key -h --help Output this help and exit --version Output version and exit """ import time from multiprocessing import Process # 3rd party imports from docopt import docopt def loop(rc, reset_last_key=None): """ Regular debug loop that can be used to test how redis behaves during changes in the cluster. """ if reset_last_key: rc.set("__last__", 0) last = False while last is False: try: last = rc.get("__last__") last = 0 if not last else int(last) print("starting at foo{0}".format(last)) except Exception as e: print("error {0}".format(e)) time.sleep(1) for i in range(last, 1000000000): # noqa try: print("SET foo{0} {1}".format(i, i)) rc.set("foo{0}".format(i), i) got = rc.get("foo{0}".format(i)) print("GET foo{0} {1}".format(i, got)) rc.set("__last__", i) except Exception as e: print("error {0}".format(e)) time.sleep(0.05) def timeit(rc, num): """ Time how long it take to run a number of set/get:s """ for i in range(0, num//2): # noqa s = "foo{0}".format(i) rc.set(s, i) rc.get(s) def timeit_pipeline(rc, num): """ Time how long it takes to run a number of set/get:s inside a cluster pipeline """ for i in range(0, num//2): # noqa s = "foo{0}".format(i) p = rc.pipeline() p.set(s, i) p.get(s) p.execute() if __name__ == "__main__": args = docopt(__doc__, version="0.3.1") startup_nodes = [{"host": args['--host'], "port": args['-p']}] if not args["--nocluster"]: from rediscluster import RedisCluster rc = RedisCluster(startup_nodes=startup_nodes, max_connections=32, socket_timeout=0.1, decode_responses=True) else: from redis import Redis rc = Redis(host=args["--host"], port=args["-p"], socket_timeout=0.1, decode_responses=True) # create specified number processes processes = [] single_request = int(args["-n"]) // int(args["-c"]) for j in range(int(args["-c"])): if args["--timeit"]: if args["--pipeline"]: p = Process(target=timeit_pipeline, args=(rc, single_request)) else: p = Process(target=timeit, args=(rc, single_request)) else: p = Process(target=loop, args=(rc, args["--resetlastkey"])) processes.append(p) t1 = time.time() for p in processes: p.start() for p in processes: p.join() t2 = time.time() - t1 print("Tested {0}k SET & GET (each 50%) operations took: {1} seconds... {2} operations per second".format(int(args["-n"]) / 1000, t2, int(args["-n"]) / t2 * 2)) redis-py-cluster-2.0.0/dev-requirements.txt000066400000000000000000000001541352661744600210020ustar00rootroot00000000000000-r requirements.txt coverage pytest testfixtures mock docopt tox python-coveralls ptpdb ptpython pysnooper redis-py-cluster-2.0.0/docs/000077500000000000000000000000001352661744600156725ustar00rootroot00000000000000redis-py-cluster-2.0.0/docs/License.txt000066400000000000000000000020711352661744600200150ustar00rootroot00000000000000Copyright (c) 2014-2019 Johan Andersson Permission is hereby granted, free of charge, to any person obtaining a copy of this software and associated documentation files (the "Software"), to deal in the Software without restriction, including without limitation the rights to use, copy, modify, merge, publish, distribute, sublicense, and/or sell copies of the Software, and to permit persons to whom the Software is furnished to do so, subject to the following conditions: The above copyright notice and this permission notice shall be included in all copies or substantial portions of the Software. THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.redis-py-cluster-2.0.0/docs/Makefile000066400000000000000000000173231352661744600173400ustar00rootroot00000000000000# Makefile for Sphinx documentation # # You can set these variables from the command line. SPHINXOPTS = SPHINXBUILD = sphinx-build PAPER = BUILDDIR = _build # User-friendly check for sphinx-build ifeq ($(shell which $(SPHINXBUILD) >/dev/null 2>&1; echo $$?), 1) $(error The '$(SPHINXBUILD)' command was not found. Make sure you have Sphinx installed, then set the SPHINXBUILD environment variable to point to the full path of the '$(SPHINXBUILD)' executable. Alternatively you can add the directory with the executable to your PATH. If you don\'t have Sphinx installed, grab it from http://sphinx-doc.org/) endif # Internal variables. PAPEROPT_a4 = -D latex_paper_size=a4 PAPEROPT_letter = -D latex_paper_size=letter ALLSPHINXOPTS = -d $(BUILDDIR)/doctrees $(PAPEROPT_$(PAPER)) $(SPHINXOPTS) . # the i18n builder cannot share the environment and doctrees with the others I18NSPHINXOPTS = $(PAPEROPT_$(PAPER)) $(SPHINXOPTS) . .PHONY: help help: @echo "Please use \`make ' where is one of" @echo " html to make standalone HTML files" @echo " dirhtml to make HTML files named index.html in directories" @echo " singlehtml to make a single large HTML file" @echo " pickle to make pickle files" @echo " json to make JSON files" @echo " htmlhelp to make HTML files and a HTML help project" @echo " qthelp to make HTML files and a qthelp project" @echo " applehelp to make an Apple Help Book" @echo " devhelp to make HTML files and a Devhelp project" @echo " epub to make an epub" @echo " epub3 to make an epub3" @echo " latex to make LaTeX files, you can set PAPER=a4 or PAPER=letter" @echo " latexpdf to make LaTeX files and run them through pdflatex" @echo " latexpdfja to make LaTeX files and run them through platex/dvipdfmx" @echo " text to make text files" @echo " man to make manual pages" @echo " texinfo to make Texinfo files" @echo " info to make Texinfo files and run them through makeinfo" @echo " gettext to make PO message catalogs" @echo " changes to make an overview of all changed/added/deprecated items" @echo " xml to make Docutils-native XML files" @echo " pseudoxml to make pseudoxml-XML files for display purposes" @echo " linkcheck to check all external links for integrity" @echo " doctest to run all doctests embedded in the documentation (if enabled)" @echo " coverage to run coverage check of the documentation (if enabled)" .PHONY: clean clean: rm -rf $(BUILDDIR)/* .PHONY: html html: $(SPHINXBUILD) -b html $(ALLSPHINXOPTS) $(BUILDDIR)/html @echo @echo "Build finished. The HTML pages are in $(BUILDDIR)/html." .PHONY: dirhtml dirhtml: $(SPHINXBUILD) -b dirhtml $(ALLSPHINXOPTS) $(BUILDDIR)/dirhtml @echo @echo "Build finished. The HTML pages are in $(BUILDDIR)/dirhtml." .PHONY: singlehtml singlehtml: $(SPHINXBUILD) -b singlehtml $(ALLSPHINXOPTS) $(BUILDDIR)/singlehtml @echo @echo "Build finished. The HTML page is in $(BUILDDIR)/singlehtml." .PHONY: pickle pickle: $(SPHINXBUILD) -b pickle $(ALLSPHINXOPTS) $(BUILDDIR)/pickle @echo @echo "Build finished; now you can process the pickle files." .PHONY: json json: $(SPHINXBUILD) -b json $(ALLSPHINXOPTS) $(BUILDDIR)/json @echo @echo "Build finished; now you can process the JSON files." .PHONY: htmlhelp htmlhelp: $(SPHINXBUILD) -b htmlhelp $(ALLSPHINXOPTS) $(BUILDDIR)/htmlhelp @echo @echo "Build finished; now you can run HTML Help Workshop with the" \ ".hhp project file in $(BUILDDIR)/htmlhelp." .PHONY: qthelp qthelp: $(SPHINXBUILD) -b qthelp $(ALLSPHINXOPTS) $(BUILDDIR)/qthelp @echo @echo "Build finished; now you can run "qcollectiongenerator" with the" \ ".qhcp project file in $(BUILDDIR)/qthelp, like this:" @echo "# qcollectiongenerator $(BUILDDIR)/qthelp/redis-py-cluster.qhcp" @echo "To view the help file:" @echo "# assistant -collectionFile $(BUILDDIR)/qthelp/redis-py-cluster.qhc" .PHONY: applehelp applehelp: $(SPHINXBUILD) -b applehelp $(ALLSPHINXOPTS) $(BUILDDIR)/applehelp @echo @echo "Build finished. The help book is in $(BUILDDIR)/applehelp." @echo "N.B. You won't be able to view it unless you put it in" \ "~/Library/Documentation/Help or install it in your application" \ "bundle." .PHONY: devhelp devhelp: $(SPHINXBUILD) -b devhelp $(ALLSPHINXOPTS) $(BUILDDIR)/devhelp @echo @echo "Build finished." @echo "To view the help file:" @echo "# mkdir -p $$HOME/.local/share/devhelp/redis-py-cluster" @echo "# ln -s $(BUILDDIR)/devhelp $$HOME/.local/share/devhelp/redis-py-cluster" @echo "# devhelp" .PHONY: epub epub: $(SPHINXBUILD) -b epub $(ALLSPHINXOPTS) $(BUILDDIR)/epub @echo @echo "Build finished. The epub file is in $(BUILDDIR)/epub." .PHONY: epub3 epub3: $(SPHINXBUILD) -b epub3 $(ALLSPHINXOPTS) $(BUILDDIR)/epub3 @echo @echo "Build finished. The epub3 file is in $(BUILDDIR)/epub3." .PHONY: latex latex: $(SPHINXBUILD) -b latex $(ALLSPHINXOPTS) $(BUILDDIR)/latex @echo @echo "Build finished; the LaTeX files are in $(BUILDDIR)/latex." @echo "Run \`make' in that directory to run these through (pdf)latex" \ "(use \`make latexpdf' here to do that automatically)." .PHONY: latexpdf latexpdf: $(SPHINXBUILD) -b latex $(ALLSPHINXOPTS) $(BUILDDIR)/latex @echo "Running LaTeX files through pdflatex..." $(MAKE) -C $(BUILDDIR)/latex all-pdf @echo "pdflatex finished; the PDF files are in $(BUILDDIR)/latex." .PHONY: latexpdfja latexpdfja: $(SPHINXBUILD) -b latex $(ALLSPHINXOPTS) $(BUILDDIR)/latex @echo "Running LaTeX files through platex and dvipdfmx..." $(MAKE) -C $(BUILDDIR)/latex all-pdf-ja @echo "pdflatex finished; the PDF files are in $(BUILDDIR)/latex." .PHONY: text text: $(SPHINXBUILD) -b text $(ALLSPHINXOPTS) $(BUILDDIR)/text @echo @echo "Build finished. The text files are in $(BUILDDIR)/text." .PHONY: man man: $(SPHINXBUILD) -b man $(ALLSPHINXOPTS) $(BUILDDIR)/man @echo @echo "Build finished. The manual pages are in $(BUILDDIR)/man." .PHONY: texinfo texinfo: $(SPHINXBUILD) -b texinfo $(ALLSPHINXOPTS) $(BUILDDIR)/texinfo @echo @echo "Build finished. The Texinfo files are in $(BUILDDIR)/texinfo." @echo "Run \`make' in that directory to run these through makeinfo" \ "(use \`make info' here to do that automatically)." .PHONY: info info: $(SPHINXBUILD) -b texinfo $(ALLSPHINXOPTS) $(BUILDDIR)/texinfo @echo "Running Texinfo files through makeinfo..." make -C $(BUILDDIR)/texinfo info @echo "makeinfo finished; the Info files are in $(BUILDDIR)/texinfo." .PHONY: gettext gettext: $(SPHINXBUILD) -b gettext $(I18NSPHINXOPTS) $(BUILDDIR)/locale @echo @echo "Build finished. The message catalogs are in $(BUILDDIR)/locale." .PHONY: changes changes: $(SPHINXBUILD) -b changes $(ALLSPHINXOPTS) $(BUILDDIR)/changes @echo @echo "The overview file is in $(BUILDDIR)/changes." .PHONY: linkcheck linkcheck: $(SPHINXBUILD) -b linkcheck $(ALLSPHINXOPTS) $(BUILDDIR)/linkcheck @echo @echo "Link check complete; look for any errors in the above output " \ "or in $(BUILDDIR)/linkcheck/output.txt." .PHONY: doctest doctest: $(SPHINXBUILD) -b doctest $(ALLSPHINXOPTS) $(BUILDDIR)/doctest @echo "Testing of doctests in the sources finished, look at the " \ "results in $(BUILDDIR)/doctest/output.txt." .PHONY: coverage coverage: $(SPHINXBUILD) -b coverage $(ALLSPHINXOPTS) $(BUILDDIR)/coverage @echo "Testing of coverage in the sources finished, look at the " \ "results in $(BUILDDIR)/coverage/python.txt." .PHONY: xml xml: $(SPHINXBUILD) -b xml $(ALLSPHINXOPTS) $(BUILDDIR)/xml @echo @echo "Build finished. The XML files are in $(BUILDDIR)/xml." .PHONY: pseudoxml pseudoxml: $(SPHINXBUILD) -b pseudoxml $(ALLSPHINXOPTS) $(BUILDDIR)/pseudoxml @echo @echo "Build finished. The pseudo-XML files are in $(BUILDDIR)/pseudoxml." redis-py-cluster-2.0.0/docs/authors.rst000066400000000000000000000020541352661744600201120ustar00rootroot00000000000000Project Authors =============== Added in the order they contributed. If you are mentioned in this document and want your row changed for any reason, open a new PR with changes. Lead author and maintainer: Grokzen - https://github.com/Grokzen Authors who contributed code or testing: - Dobrite - https://github.com/dobrite - 72squared - https://github.com/72squared - Neuron Teckid - https://github.com/neuront - iandyh - https://github.com/iandyh - mumumu - https://github.com/mumumu - awestendorf - https://github.com/awestendorf - Ali-Akber Saifee - https://github.com/alisaifee - etng - https://github.com/etng - gmolight - https://github.com/gmolight - baranbartu - https://github.com/baranbartu - monklof - https://github.com/monklof - dutradda - https://github.com/dutradda - AngusP - https://github.com/AngusP - Doug Kent - https://github.com/dkent - VascoVisser - https://github.com/VascoVisser - astrohsy - https://github.com/astrohsy - Artur Stawiarski - https://github.com/astawiarski - Matthew Anderson - https://github.com/mc3ander redis-py-cluster-2.0.0/docs/benchmarks.rst000066400000000000000000000043661352661744600205520ustar00rootroot00000000000000Benchmarks ========== These are a few benchmarks that are designed to test specific parts of the code to demonstrate the performance difference between using this lib and the normal Redis client. Setup benchmarks ---------------- Before running any benchmark you should install this lib in editable mode inside a virtualenv so it can import `RedisCluster` lib. Install with .. code-block:: bash pip install -e . You also need a few redis servers to test against. You must have one cluster with at least one node on port `7001` and you must also have a non-clustered server on port `7007`. Implemented benchmarks --------------------- - `simple.py`, This benchmark can be used to measure a simple `set` and `get` operation chain. It also supports running pipelines by adding the flag `--pipeline`. Run predefined benchmarks ------------------------- These are a set of predefined benchmarks that can be run to measure the performance drop from using this library. To run the benchmarks run .. code-block:: bash make benchmark Example output and comparison of different runmodes .. code-block:: -- Running Simple benchmark with Redis lib and non cluster server, 50 concurrent processes and total 50000*2 requests -- python benchmarks/simple.py --host 127.0.0.1 --timeit --nocluster -c 50 -n 50000 50.0k SET/GET operations took: 2.45 seconds... 40799.93 operations per second -- Running Simple benchmark with RedisCluster lib and cluster server, 50 concurrent processes and total 50000*2 requests -- python benchmarks/simple.py --host 127.0.0.1 --timeit -c 50 -n 50000 50.0k SET & GET (each 50%) operations took: 9.51 seconds... 31513.71 operations per second -- Running Simple benchmark with pipelines & Redis lib and non cluster server -- python benchmarks/simple.py --host 127.0.0.1 --timeit --nocluster -c 50 -n 50000 --pipeline 50.0k SET & GET (each 50%) operations took: 2.1728243827819824 seconds... 46023.047602201834 operations per second -- Running Simple benchmark with RedisCluster lib and cluster server python benchmarks/simple.py --host 127.0.0.1 --timeit -c 50 -n 50000 --pipeline 50.0k SET & GET (each 50%) operations took: 1.7181339263916016 seconds... 58202.68051514381 operations per second redis-py-cluster-2.0.0/docs/cluster-setup.rst000066400000000000000000000016051352661744600212450ustar00rootroot00000000000000Redis cluster setup =================== Manually -------- - Redis cluster tutorial: http://redis.io/topics/cluster-tutorial - Redis cluster specs: http://redis.io/topics/cluster-spec - This video will describe how to setup and use a redis cluster: http://vimeo.com/63672368 (This video is outdated but could server as a good tutorial/example) Docker ------ A fully functional docker image can be found at https://github.com/Grokzen/docker-redis-cluster See repo `README` for detailed instructions how to setup and run. Vagrant ------- A fully functional vagrant box can be found at https://github.com/72squared/vagrant-redis-cluste See repo `README` for detailed instructions how to setup and run. Simple makefile --------------- A simple makefile solution can be found at https://github.com/Grokzen/travis-redis-cluster See repo `README` for detailed instructions how to setup. redis-py-cluster-2.0.0/docs/commands.rst000066400000000000000000000062351352661744600202330ustar00rootroot00000000000000Implemented commands ==================== This will describe all changes that RedisCluster have done to make a command to work in a cluster environment. If a command is not listed here then the default implementation from `Redis` in the `redis-py` library is used. Fanout Commands --------------- The following commands will send the same request to all nodes in the cluster. Results is returned as a dict with k,v pair (NodeID, Result). - bgrewriteaof - bgsave - client_getname - client_kill - client_list - client_setname - config_get - config_resetstat - config_rewrite - config_set - dbsize - echo - info - lastsave - ping - save - slowlog_get - slowlog_len - slowlog_reset - time The pubsub commands are sent to all nodes, and the resulting replies are merged together. They have an optional keyword argument `aggregate` which when set to `False` will return a dict with k,v pair (NodeID, Result) instead of the merged result. - pubsub_channels - pubsub_numsub - pubsub_numpat This command will send the same request to all nodes in the cluster in sequence. Results is appended to a unified list. - keys The following commands will only be send to the master nodes in the cluster. Results is returned as a dict with k,v pair (NodeID, Command-Result). - flushall - flushdb - scan This command will sent to a random node in the cluster. - publish The following commands will be sent to the server that matches the first key. - eval - evalsha This following commands will be sent to the master nodes in the cluster. - script load - the result is the hash of loaded script - script flush - the result is `True` if the command succeeds on all master nodes, else `False` - script exists - the result is an array of booleans. An entry is `True` only if the script exists on all the master nodes. The following commands will be sent to the sever that matches the specefied key. - hscan - hscan_iter - scan_iter - sscan - sscan_iter - zscan - zscan_iter Blocked commands ---------------- The following commands is blocked from use. Either because they do not work, there is no working implementation or it is not good to use them within a cluster. - bitop - Currently to hard to implement a solution in python space - client_setname - Not yet implemented - move - It is not possible to move a key from one db to another in cluster mode - restore - script_kill - Not yet implemented - sentinel - sentinel_get_master_addr_by_name - sentinel_master - sentinel_masters - sentinel_monitor - sentinel_remove - sentinel_sentinels - sentinel_set - sentinel_slaves - shutdown - slaveof - Cluster management should be done via redis-trib.rb manually - unwatch - Not yet implemented - watch - Not yet implemented Overridden methods ------------------ The following methods is overridden from Redis with a custom implementation. They can operate on keys that exists in different hashslots and require a client side implementation to work. - brpoplpus - mget - mset - msetnx - pfmerge - randomkey - rename - renamenx - rpoplpush - sdiff - sdiffstore - sinter - sinterstore - smove - sort - sunion - sunionstore - zinterstore - zunionstore redis-py-cluster-2.0.0/docs/conf.py000066400000000000000000000223721352661744600171770ustar00rootroot00000000000000# -*- coding: utf-8 -*- # # redis-py-cluster documentation build configuration file, created by # sphinx-quickstart on Tue Mar 29 23:29:46 2016. # # This file is execfile()d with the current directory set to its # containing dir. # # Note that not all possible configuration values are present in this # autogenerated file. # # All configuration values have a default; values that are commented out # serve to show the default. import sys import os # If extensions (or modules to document with autodoc) are in another directory, # add these directories to sys.path here. If the directory is relative to the # documentation root, use os.path.abspath to make it absolute, like shown here. #sys.path.insert(0, os.path.abspath('.')) # -- General configuration ------------------------------------------------ # If your documentation needs a minimal Sphinx version, state it here. #needs_sphinx = '1.0' # Add any Sphinx extension module names here, as strings. They can be # extensions coming with Sphinx (named 'sphinx.ext.*') or your custom # ones. extensions = [] # Add any paths that contain templates here, relative to this directory. templates_path = ['_templates'] # The suffix(es) of source filenames. # You can specify multiple suffix as a list of string: # source_suffix = ['.rst', '.md'] source_suffix = '.rst' # The encoding of source files. #source_encoding = 'utf-8-sig' # The master toctree document. master_doc = 'index' # General information about the project. project = u'redis-py-cluster' copyright = u'2016, Johan Andersson' author = u'Johan Andersson' # The version info for the project you're documenting, acts as replacement for # |version| and |release|, also used in various other places throughout the # built documents. # # The short X.Y version. version = u'1.2.0' # The full version, including alpha/beta/rc tags. release = u'1.2.0' # The language for content autogenerated by Sphinx. Refer to documentation # for a list of supported languages. # # This is also used if you do content translation via gettext catalogs. # Usually you set "language" from the command line for these cases. language = None # There are two options for replacing |today|: either, you set today to some # non-false value, then it is used: #today = '' # Else, today_fmt is used as the format for a strftime call. #today_fmt = '%B %d, %Y' # List of patterns, relative to source directory, that match files and # directories to ignore when looking for source files. # This patterns also effect to html_static_path and html_extra_path exclude_patterns = ['_build', 'Thumbs.db', '.DS_Store'] # The reST default role (used for this markup: `text`) to use for all # documents. #default_role = None # If true, '()' will be appended to :func: etc. cross-reference text. #add_function_parentheses = True # If true, the current module name will be prepended to all description # unit titles (such as .. function::). #add_module_names = True # If true, sectionauthor and moduleauthor directives will be shown in the # output. They are ignored by default. #show_authors = False # The name of the Pygments (syntax highlighting) style to use. pygments_style = 'sphinx' # A list of ignored prefixes for module index sorting. #modindex_common_prefix = [] # If true, keep warnings as "system message" paragraphs in the built documents. #keep_warnings = False # If true, `todo` and `todoList` produce output, else they produce nothing. todo_include_todos = False # -- Options for HTML output ---------------------------------------------- # The theme to use for HTML and HTML Help pages. See the documentation for # a list of builtin themes. html_theme = 'default' # Theme options are theme-specific and customize the look and feel of a theme # further. For a list of options available for each theme, see the # documentation. #html_theme_options = {} # Add any paths that contain custom themes here, relative to this directory. #html_theme_path = [] # The name for this set of Sphinx documents. # " v documentation" by default. #html_title = u'redis-py-cluster v1.2.0' # A shorter title for the navigation bar. Default is the same as html_title. #html_short_title = None # The name of an image file (relative to this directory) to place at the top # of the sidebar. #html_logo = None # The name of an image file (relative to this directory) to use as a favicon of # the docs. This file should be a Windows icon file (.ico) being 16x16 or 32x32 # pixels large. #html_favicon = None # Add any paths that contain custom static files (such as style sheets) here, # relative to this directory. They are copied after the builtin static files, # so a file named "default.css" will overwrite the builtin "default.css". html_static_path = ['_static'] # Add any extra paths that contain custom files (such as robots.txt or # .htaccess) here, relative to this directory. These files are copied # directly to the root of the documentation. #html_extra_path = [] # If not None, a 'Last updated on:' timestamp is inserted at every page # bottom, using the given strftime format. # The empty string is equivalent to '%b %d, %Y'. #html_last_updated_fmt = None # If true, SmartyPants will be used to convert quotes and dashes to # typographically correct entities. #html_use_smartypants = True # Custom sidebar templates, maps document names to template names. #html_sidebars = {} # Additional templates that should be rendered to pages, maps page names to # template names. #html_additional_pages = {} # If false, no module index is generated. #html_domain_indices = True # If false, no index is generated. #html_use_index = True # If true, the index is split into individual pages for each letter. #html_split_index = False # If true, links to the reST sources are added to the pages. #html_show_sourcelink = True # If true, "Created using Sphinx" is shown in the HTML footer. Default is True. #html_show_sphinx = True # If true, "(C) Copyright ..." is shown in the HTML footer. Default is True. #html_show_copyright = True # If true, an OpenSearch description file will be output, and all pages will # contain a tag referring to it. The value of this option must be the # base URL from which the finished HTML is served. #html_use_opensearch = '' # This is the file name suffix for HTML files (e.g. ".xhtml"). #html_file_suffix = None # Language to be used for generating the HTML full-text search index. # Sphinx supports the following languages: # 'da', 'de', 'en', 'es', 'fi', 'fr', 'hu', 'it', 'ja' # 'nl', 'no', 'pt', 'ro', 'ru', 'sv', 'tr', 'zh' #html_search_language = 'en' # A dictionary with options for the search language support, empty by default. # 'ja' uses this config value. # 'zh' user can custom change `jieba` dictionary path. #html_search_options = {'type': 'default'} # The name of a javascript file (relative to the configuration directory) that # implements a search results scorer. If empty, the default will be used. #html_search_scorer = 'scorer.js' # Output file base name for HTML help builder. htmlhelp_basename = 'redis-py-clusterdoc' # -- Options for LaTeX output --------------------------------------------- latex_elements = { # The paper size ('letterpaper' or 'a4paper'). #'papersize': 'letterpaper', # The font size ('10pt', '11pt' or '12pt'). #'pointsize': '10pt', # Additional stuff for the LaTeX preamble. #'preamble': '', # Latex figure (float) alignment #'figure_align': 'htbp', } # Grouping the document tree into LaTeX files. List of tuples # (source start file, target name, title, # author, documentclass [howto, manual, or own class]). latex_documents = [ (master_doc, 'redis-py-cluster.tex', u'redis-py-cluster Documentation', u'Johan Andersson', 'manual'), ] # The name of an image file (relative to this directory) to place at the top of # the title page. #latex_logo = None # For "manual" documents, if this is true, then toplevel headings are parts, # not chapters. #latex_use_parts = False # If true, show page references after internal links. #latex_show_pagerefs = False # If true, show URL addresses after external links. #latex_show_urls = False # Documents to append as an appendix to all manuals. #latex_appendices = [] # If false, no module index is generated. #latex_domain_indices = True # -- Options for manual page output --------------------------------------- # One entry per manual page. List of tuples # (source start file, name, description, authors, manual section). man_pages = [ (master_doc, 'redis-py-cluster', u'redis-py-cluster Documentation', [author], 1) ] # If true, show URL addresses after external links. #man_show_urls = False # -- Options for Texinfo output ------------------------------------------- # Grouping the document tree into Texinfo files. List of tuples # (source start file, target name, title, author, # dir menu entry, description, category) texinfo_documents = [ (master_doc, 'redis-py-cluster', u'redis-py-cluster Documentation', author, 'redis-py-cluster', 'One line description of project.', 'Miscellaneous'), ] # Documents to append as an appendix to all manuals. #texinfo_appendices = [] # If false, no module index is generated. #texinfo_domain_indices = True # How to display URL addresses: 'footnote', 'no', or 'inline'. #texinfo_show_urls = 'footnote' # If true, do not generate a @detailmenu in the "Top" node's menu. #texinfo_no_detailmenu = False redis-py-cluster-2.0.0/docs/disclaimer.rst000066400000000000000000000014271352661744600205440ustar00rootroot00000000000000Disclaimer ========== Both Redis cluster and redis-py-cluster is considered stable and production ready. But this depends on what you are going to use clustering for. In the simple use cases with SET/GET and other single key functions there is not issues. If you require multi key functinoality or pipelines then you must be very careful when developing because they work slightly different from the normal redis server. If you require advance features like pubsub or scripting, this lib and redis do not handle that kind of use-cases very well. You either need to develop a custom solution yourself or use a non clustered redis server for that. Finally, this lib itself is very stable and i know of atleast 2 companies that use this in production with high loads and big cluster sizes. redis-py-cluster-2.0.0/docs/index.rst000066400000000000000000000127001352661744600175330ustar00rootroot00000000000000.. redis-py-cluster documentation master file, created by sphinx-quickstart on Tue Mar 29 23:29:46 2016. You can adapt this file completely to your liking, but it should at least contain the root `toctree` directive. Welcome to redis-py-cluster's documentation! ============================================ This project is a port of `redis-rb-cluster` by antirez, with a lot of added functionality. The original source can be found at https://github.com/antirez/redis-rb-cluster. The source code for this project is `available on github`_. .. _available on github: http://github.com/grokzen/redis-py-cluster Installation ------------ Latest stable release from pypi .. code-block:: bash $ pip install redis-py-cluster or from source code .. code-block:: bash $ python setup.py install Basic usage example ------------- Small sample script that shows how to get started with RedisCluster. It can also be found in the file `exmaples/basic.py`. Additional code examples of more advance functionality can be found in the `examples/` folder in the source code git repo. .. code-block:: python >>> from rediscluster import RedisCluster >>> # Requires at least one node for cluster discovery. Multiple nodes is recommended. >>> startup_nodes = [{"host": "127.0.0.1", "port": "7000"}] >>> # Note: See note on Python 3 for decode_responses behaviour >>> rc = RedisCluster(startup_nodes=startup_nodes, decode_responses=True) >>> rc.set("foo", "bar") True >>> print(rc.get("foo")) 'bar' .. note:: Python 3 Since Python 3 changed to Unicode strings from Python 2's ASCII, the return type of *most* commands will be binary strings, unless the class is instantiated with the option ``decode_responses=True``. In this case, the responses will be Python 3 strings (Unicode). For the init argument `decode_responses`, when set to False, redis-py-cluster will not attempt to decode the responses it receives. In Python 3, this means the responses will be of type `bytes`. In Python 2, they will be native strings (`str`). If `decode_responses` is set to True, for Python 3 responses will be `str`, for Python 2 they will be `unicode`. Library Dependencies -------------------- It is always recommended to use the latest version of the dependencies of this project. - Redis-py: 'redis>=3.0.0,<3.1.0' is required in this major version of this cluster lib. - Optional Python: hiredis >= `0.2.0`. Older versions might work but is not tested. - A working Redis cluster based on version `>=3.0.0` is required. Supported python versions ------------------------- Python versions should follow the same supported python versions as specificed by the upstream package `redis-py`, based on what major version(s) that is specified. If this library supports more then one major version line of `redis-py`, then the supported python versions must include the set of supported python versions by all major version lines. - 2.7.x - 3.4.1+ (See note) - 3.5.x - 3.6.x - 3.7.x .. note:: Python 3.4.0 A segfault was found when running `redis-py` in python `3.4.0` that was introduced into the codebase in python itself in the version `3.4.0`. Because of this both `redis-py` and `redis-py-cluster` will not work when running with `3.4.0`. This lib has decided to block the lib from execution on `3.4.0` and you will get a exception when trying to import the code. The only solution is to use python `3.4.1` or some other higher minor version in the `3.4` series. When python `3.8.0` is released and when it is added to as a supported pythoon version, python 3.4.x will be removed from supported versions and this hard block will be removed from the source code. Regarding duplicate package name on pypi ---------------------------------------- It has been found that the python module name that is used in this library (rediscluster) is already shared with a similar but older project. This lib will `NOT` change the naming of the module to something else to prevent collisions between the libs. My reasoning for this is the following - Changing the namespace is a major task and probably should only be done in a complete rewrite of the lib, or if the lib had plans for a version 2.0.0 where this kind of backwards incompatibility could be introduced. - This project is more up to date, the last merged PR in the other project was 3 years ago. - This project is aimed for implement support for the cluster support in 3.0+, the other lib do not have that right now, but they implement almost the same cluster solution as the 3.0+ but in much more in the client side. - The 2 libs is not compatible to be run at the same time even if the name would not collide. It is not recommended to run both in the same python interpreter. An issue has been raised in each repository to have tracking of the problem. redis-py-cluster: https://github.com/Grokzen/redis-py-cluster/issues/150 rediscluster: https://github.com/salimane/rediscluster-py/issues/11 The Usage Guide --------------- .. _cluster_docs: .. toctree:: :maxdepth: 2 :glob: commands limitations-and-differences pipelines pubsub readonly-mode .. _setup_and_performance: .. toctree:: :maxdepth: 2 :glob: cluster-setup benchmarks The Community Guide -------------------- .. _community-guide: .. toctree:: :maxdepth: 2 :glob: project-status testing upgrading release-notes authors license disclaimer redis-py-cluster-2.0.0/docs/license.rst000066400000000000000000000002641352661744600200500ustar00rootroot00000000000000Licensing --------- Copyright (c) 2013-2019 Johan Andersson MIT (See docs/License.txt file) The license should be the same as redis-py (https://github.com/andymccurdy/redis-py) redis-py-cluster-2.0.0/docs/limitations-and-differences.rst000066400000000000000000000033601352661744600237750ustar00rootroot00000000000000Limitations and differences =========================== This will compare against `redis-py` There is alot of differences that have to be taken into consideration when using redis cluster. Any method that can operate on multiple keys have to be reimplemented in the client and in some cases that is not possible to do. In general any method that is overriden in RedisCluster have lost the ability of being atomic. Pipelines do not work the same way in a cluster. In `Redis` it batch all commands so that they can be executed at the same time when requested. But with RedisCluster pipelines will send the command directly to the server when it is called, but it will still store the result internally and return the same data from .execute(). This is done so that the code still behaves like a pipeline and no code will break. A better solution will be implemented in the future. Alot of methods will behave very different when using RedisCluster. Some methods send the same request to all servers and return the result in another format then `Redis` do. Some methods is blocked because they do not work / is not implemented / is dangerous to use in redis cluster. Some of the commands are only partially supported when using RedisCluster. The commands ``zinterstore`` and ``zunionstore`` are only supported if all the keys map to the same key slot in the cluster. This can be achieved by namespacing related keys with a prefix followed by a bracketed common key. Example: .. code-block:: python r.zunionstore('d{foo}', ['a{foo}', 'b{foo}', 'c{foo}']) This corresponds to how redis behaves in cluster mode. Eventually these commands will likely be more fully supported by implementing the logic in the client library at the expense of atomicity and performance. redis-py-cluster-2.0.0/docs/pipelines.rst000066400000000000000000000515061352661744600204230ustar00rootroot00000000000000Pipelines ========= How pipelining works -------------------- In redis-py-cluster, pipelining is all about trying to achieve greater network efficiency. Transaction support is disabled in redis-py-cluster. Use pipelines to avoid extra network round-trips, not to ensure atomicity. Just like in `redis-py`, `redis-py-cluster` queues up all the commands inside the client until execute is called. But, once execute is called, `redis-py-cluster` internals work slightly differently. It still packs the commands to efficiently transmit multiple commands across the network. But since different keys may be mapped to different nodes, redis-py-cluster must first map each key to the expected node. It then packs all the commands destined for each node in the cluster into its own packed sequence of commands. It uses the redis-py library to communicate with each node in the cluster. Ideally all the commands should be sent to each node in the cluster in parallel so that all the commands can be processed as fast as possible. We do this by first writing all of the commands to the sockets sequentially before reading any of the responses. This allows us to parallelize the network i/o without the overhead of managing python threads. In previous versions of the library there were some bugs associated with pipelining operations. In an effort to simplify the logic and lessen the likelihood of bugs, if we get back connection errors, MOVED errors, ASK errors or any other error that can safely be retried, we fall back to sending these remaining commands sequentially to each individual node just as we would in a normal redis call. We still buffer the results inside the pipeline response so there will be no change in client behavior. During normal cluster operations, pipelined commands should work nearly efficiently as pipelined commands to a single instance redis. When there is a disruption to the cluster topography, like when keys are being resharded, or when a slave takes over for a master, there will be a slight loss of network efficiency. Commands that are rejected by the server are tried one at a time as we rebuild the slot mappings. Once the slots table is rebuilt correctly (usally in a second or so), the client resumes efficient networking behavior. We felt it was more important to prioritize correctness of behavior and reliable error handling over networking efficiency for the rare cases where the cluster topography is in flux. Connection Error handling ------------------------- The other way pipelines differ in `redis-py-cluster` from `redis-py` is in error handling and retries. With the normal `redis-py` client, if you hit a connection error during a pipeline command it raises the error right there. But we expect redis-cluster to be more resilient to failures. If you hit a connection problem with one of the nodes in the cluster, most likely a stand-by slave will take over for the down master pretty quickly. In this case, we try the commands bound for that particular node to another random node. The other random node will not just blindly accept these commands. It only accepts them if the keys referenced in those commands actually map to that node in the cluster configuration. Most likely it will respond with a `MOVED` error telling the client the new master for those commands. Our code handles these `MOVED` commands according to the redis cluster specification and re-issues the commands to the correct server transparently inside of `pipeline.execute()` method. You can disable this behavior if you'd like as well. # ASKED and MOVED errors The other tricky part of the redis-cluster specification is that if any command response comes back with an `ASK` or `MOVED` error, the command is to be retried against the specified node. In previous versions of `redis-py-cluster` treated `ASKED` and `MOVED` errors the same, but they really need to be handled differently. `MOVED` error means that the client can safely update its own representation of the slots table to point to a new node for all future commands bound for that slot. An `ASK` error means the slot is only partially migrated and that the client can only successfully issue that command to the new server if it prefixes the request with an `ASKING¨ ` command first. This lets the new node taking over that slot know that the original server said it was okay to run that command for the given key against the new node even though the slot is not yet completely migrated. Our current implementation now handles this case correctly. The philosophy on pipelines --------------------------- After playing around with pipelines and thinking about possible solutions that could be used in a cluster setting this document will describe how pipelines work, strengths and weaknesses of the implementation that was chosen. Why can't we reuse the pipeline code in `redis-py`? In short it is almost the same reason why code from the normal redis client can't be reused in a cluster environment and that is because of the slots system. Redis cluster consist of a number of slots that is distributed across a number of servers and each key belongs in one of these slots. In the normal pipeline implementation in `redis-py` we can batch send all the commands and send them to the server at once, thus speeding up the code by not issuing many requests one after another. We can say that we have defined and guaranteed execution order becuase of this. One problem that appears when you want to do pipelines in a cluster environment is that you can't have guaranteed execution order in the same way as a single server pipeline. The problem is that because you can queue a command to any key, we will end up in most of the cases having to talk to 2 or more nodes in the cluster to execute the pipeline. The problem with that is that there is no single place/node/way to send the pipeline and redis will sort everything out by itself via some internal mechanisms. Because of that when we build a pipeline for a cluster we have to build several smaller pipelines that we each send to the designated node in the cluster. When the pipeline is executed in the client each key is checked to what slot it should be sent to and the pipeline is built up based on that information. One thing to note here is that there will be partial correct execution order if you look over the entire cluster because for each pipeline the ordering will be correct. It can also be argued that the correct execution order is applied/valid for each slot in the cluster. The next thing to take into consideration is what commands should be available and which should be blocked/locked. In most cases and in almost all solutions multi key commands have to be blocked hard from being executed inside a pipeline. This would only be possible in the case you have a pipeline implementation that always executes immediately each command is queued up. That solution would only give the interface of working like a pipeline to ensure old code will still work, but it would not give any benefits or advantages other than all commands would work and old code would work. In the solution for this lib multikey commands are blocked hard and will probably not be enabled in pipelines. If you really need to use them you need to execute them through the normal cluster client if they are implemented and work in there. Why can't multi key commands work? In short again it is because the keys can live in different slots on different nodes in the cluster. It is possible in theory to have any command work in a cluster, but only if the keys operated on belong to the same cluster slot. This lib have decided that currently no serious support for that will be attempted. Examples on commands that do not work is `MGET`, `MSET`, `MOVE`. One good thing that comes out of blocking multi key commands is that correct execution order is less of a problem and as long as it applies to each slot in the cluster we shold be fine. Consider the following example. Create a pipeline and issue 6 commands `A`, `B`, `C`, `D`, `E`, `F` and then execute it. The pipeline is calculated and 2 sub pipelines is created with `A`, `C`, `D`, `F` in the first and `B`, `E` in the second. Both pipelines are then sent to each node in the cluster and a response is sent back. For the first node `[True, MovedException(12345), MovedException(12345), True]` and from the second node [`True`, `True`]. After this response is parsed we see that 2 commands in the first pipeline did not work and must be sent to another node. This case happens if the client slots cache is wrong because a slot was migrated to another node in the cluster. After parsing the response we then build a third pipeline object with commands [`C`, `D`] to the second node. The third object is executed and passes and from the client perspective the entire pipeline was executed. If we look back at the order we executed the commands we get `[A, F]` for the first node and `[B, E, C, D]` for the second node. At first glance this looks like it is out of order because command `E` is executed before `C` & `D`. Why is this not matter? Because no multi key operations can be done in a pipeline, we only have to care the execution order is correct for each slot and in this case it was because `B` & `E` belongs to the same slot and `C` & `D` belongs to the same slot. There should be no possible way to corrupt any data between slots if multi key commands are blocked by the code. What is good with this pipeline solution? First we can actually have a pipeline solution that will work in most cases with few commands blocked (only multi key commands). Secondly we can run it in parallel to increase the performance of the pipeline even further, making the benefits even greater. Packing Commands ---------------- When issuing only a single command, there is only one network round trip to be made. But what if you issue 100 pipelined commands? In a single-instance redis configuration, you still only need to make one network hop. The commands are packed into a single request and the server responds with all the data for those requests in a single response. But with redis cluster, those keys could be spread out over many different nodes. The client is responsible for figuring out which commands map to which nodes. Let's say for example that your 100 pipelined commands need to route to 3 different nodes? The first thing the client does is break out the commands that go to each node, so it only has 3 network requests to make instead of 100. Parallel execution of pipeline ------------------------------ In older version of `redis-py-cluster`, there was a thread implementation that helped to increaes the performance of running pipelines by running the connections and execution of all commands to all nodes in the pipeline in paralell. This implementation was later removed in favor of a much simpler and faster implementation. In this new implementation we execute everything in the same thread, but we do all the writing to all sockets in order to each different server and then start to wait for them in sequence until all of them is complete. There is no real need to run them in parralell since we still have to wait for a thread join of all parralell executions before the code can continue, so we can wait in sequence for all of them to complete. This is not the absolute fastest implementation, but it much simpler to implement and maintain and cause less issues becuase there is no threads or other parallel ipmlementation that will use some overhead and add complexity to the method. This feature is implemented by default and will be used in all pipeline requests. Transactions and WATCH ---------------------- Support for transactions and WATCH:es in pipelines. If we look on the entire pipeline across all nodes in the cluster there is no possible way to have a complete transaction across all nodes because if we need to issue commands to 3 servers, each server is handled by its own and there is no way to tell other nodes to abort a transaction if only one of the nodes fail but not the others. A possible solution for that could be to implement a 2 step commit process. The 2 steps would consist of building 2 batches of commands for each node where the first batch would consist of validating the state of each slot that the pipeline wants to operate on. If any of the slots is migrating or moved then the client can correct its slots cache and issue a more correct pipeline batch. The second step would be to issue the actual commands and the data would be commited to redis. The big problem with this is that 99% of the time this would work really well if you have a very stable cluster with no migrations/resharding/servers down. But there can be times where a slot has begun migration in between the 2 steps of the pipeline and that would cause a race condition where the client thinks it has corrected the pipeline and wants to commit the data but when it does it will still fail. Why `MULTI/EXEC` support won't work in a cluster environment. There is some test code in the second `MULTI/EXEC cluster test code` of this document that tests if `MULTI/EXEC` is possible to use in a cluster pipeline. The test shows a huge problem when errors occur. If we wrap `MULTI/EXEC` in a packed set of commands then if a slot is migrating we will not get a good error we can parse and use. Currently it will only report `True` or `False` so we can narrow down what command failed but not why it failed. This might work really well if used on a non clustered node becuase it does not have to take care of `ASK` or `MOVED` errors. But for a cluster we need to know what cluster error occured so the correct action to fix the problem can be taken. Since there is more then 1 error to take care of it is not possible to take action based on just `True` or `False`. Because of this problem with error handling `MULTI/EXEC` is blocked hard in the code from being used in a pipeline because the current implementation can't handle the errors. In theory it could be possible to design a pipeline implementation that can handle this case by trying to determine by itself what it should do with the error by either asking the cluster after a `False` value was found in the response about the current state of the slot or just default to `MOVED` error handling and hope for the best. The problem is that this is not 100% guaranteed to work and can easily cause problems when wrong action was taken on the response. Currently `WATCH` requires more studying is it possible to use or not, but since it is tied into `MULTI/EXEC` pattern it probably will not be supported for now. MULTI/EXEC cluster test code ---------------------------- This code does NOT wrap `MULTI/EXEC` around the commands when packed .. code-block:: python >>> from rediscluster import RedisCluster as s >>> r = s(startup_nodes=[{"host": "127.0.0.1", "port": "7002"}]) >>> # Simulate that a slot is migrating to another node >>> r.connection_pool.nodes.slots[14226] = {'host': '127.0.0.1', 'server_type': 'master', 'port': 7001, 'name': '127.0.0.1:7001'} >>> p = r.pipeline() >>> p.command_stack = [] >>> p.command_stack.append((["SET", "ert", "tre"], {})) >>> p.command_stack.append((["SET", "wer", "rew"], {})) >>> p.execute() ClusterConnection [True, ResponseError('MOVED 14226 127.0.0.1:7002',)] ClusterConnection [True] This code DO wrap MULTI/EXEC around the commands when packed .. code-block:: python >>> from rediscluster import RedisCluster as s >>> r = s(startup_nodes=[{"host": "127.0.0.1", "port": "7002"}]) >>> # Simulate that a slot is migrating to another node >>> r.connection_pool.nodes.slots[14226] = {'host': '127.0.0.1', 'server_type': 'master', 'port': 7001, 'name': '127.0.0.1:7001'} >>> p = r.pipeline() >>> p.command_stack = [] >>> p.command_stack.append((["SET", "ert", "tre"], {})) >>> p.command_stack.append((["SET", "wer", "rew"], {})) >>> p.execute() ClusterConnection [True, False] Different pipeline solutions ---------------------------- This section will describe different types of pipeline solutions. It will list their main benefits and weaknesses. .. note:: This section is mostly random notes and thoughts and not that well written and cleaned up right now. It will be done at some point in the future. Suggestion one ************** Simple but yet sequential pipeline. This solution acts more like an interface for the already existing pipeline implementation and only provides a simple backwards compatible interface to ensure that code that sexists still will work withouth any major modifications. The good this with this implementation is that because all commands is runned in sequence it will handle `MOVED` or `ASK` redirections very good and withouth any problems. The major downside to this solution is that no command is ever batched and ran in parallel and thus you do not get any major performance boost from this approach. Other plus is that execution order is preserved across the entire cluster but a major downside is that thte commands is no longer atomic on the cluster scale because they are sent in multiple commands to different nodes. **Good** - Sequential execution of the entire pipeline - Easy `ASK` or `MOVED` handling **Bad** - No batching of commands aka. no execution speedup Suggestion two ************** Current pipeline implementation. This implementation is rather good and works well because it combines the existing pipeline interface and functionality and it also provides a basic handling of `ASK` or `MOVED` errors inside the client. One major downside to this is that execution order is not preserved across the cluster. Although the execution order is somewhat broken if you look at the entire cluster level because commands can be split so that cmd1, cmd3, cmd5 get sent to one server and cmd2, cmd4 gets sent to another server. The order is then broken globally but locally for each server it is preserved and maintained correctly. On the other hand I guess that there can't be any commands that can affect different hashslots within the same command so maybe it really doesn't matter if the execution order is not correct because for each slot/key the order is valid. There might be some issues with rebuilding the correct response ordering from the scattered data because each command might be in different sub pipelines. But I think that our current code still handles this correctly. I think I have to figure out some weird case where the execution order actually matters. There might be some issues with the nonsupported mget/mset commands that acctually performs different sub commands then it currently supports. **Good** - Sequential execution per node **Bad** - Non sequential execution on the entire pipeline - Medium difficult `ASK` or `MOVED` handling Suggestion three **************** There is a even simpler form of pipelines that can be made where all commands is supported as long as they conform to the same hashslot because REDIS supports that mode of operation. The good thing with this is that since all keys must belong to the same slot there can't be very few `ASK` or `MOVED` errors that happens and if they happen they will be very easy to handle because the entire pipeline is kinda atomic because you talk to the same server and only 1 server. There can't be any multiple server communication happening. **Good** - Super simple `ASK` or `MOVED` handling - Sequential execution per slot and through the entire pipeline **Bad** - Single slot per pipeline Suggestion four ************** One other solution is the 2 step commit solution where you send for each server 2 batches of commands. The first command should somehow establish that each keyslot is in the correct state and able to handle the data. After the client have recieved OK from all nodes that all data slots is good to use then it will acctually send the real pipeline with all data and commands. The big problem with this approach is that ther eis a gap between the checking of the slots and the acctual sending of the data where things can happen to the already established slots setup. But at the same time there is no possibility of merging these 2 steps because if step 2 is automatically runned if step 1 is Ok then the pipeline for the first node that will fail will fail but for the other nodes it will suceed but when it should not because if one command gets `ASK` or `MOVED` redirection then all pipeline objects must be rebuilt to match the new specs/setup and then reissued by the client. The major advantage of this solution is that if you have total controll of the redis server and do controlled upgrades when no clients is talking to the server then it can actually work really well because there is no possibility that `ASK` or `MOVED` will triggered by migrations in between the 2 batches. **Good** - Still rather safe because of the 2 step commit solution - Handles `ASK` or `MOVED` before commiting the data **Bad** - Big possibility of race conditions that can cause problems redis-py-cluster-2.0.0/docs/project-status.rst000066400000000000000000000016731352661744600214220ustar00rootroot00000000000000Project status ============== If you have a problem with the code or general questions about this lib, you can ping me inside the gitter channel that you can find here https://gitter.im/Grokzen/redis-py-cluster and i will help you out with problems or usage of this lib. As of release `1.0.0` this project will be considered stable and usable in production. If you are going to use redis cluster in your project, you should read up on all documentation that you can find in the bottom of this Readme file. It will contain usage examples and descriptions of what is and what is not implemented. It will also describe how and why things work the way they do in this client. On the topic about porting/moving this code into `redis-py` there is currently work over here https://github.com/andymccurdy/redis-py/pull/604 that will bring cluster support based on this code. But my suggestion is that until that work is completed that you should use this lib. redis-py-cluster-2.0.0/docs/pubsub.rst000066400000000000000000000067271352661744600177400ustar00rootroot00000000000000Pubsub ====== After testing pubsub in cluster mode one big problem was discovered with the `PUBLISH` command. According to the current official redis documentation on `PUBLISH`:: Integer reply: the number of clients that received the message. It was initially assumed that if we had clients connected to different nodes in the cluster it would still report back the correct number of clients that recieved the message. However after some testing of this command it was discovered that it would only report the number of clients that have subscribed on the same server the `PUBLISH` command was executed on. Because of this, if there is some functionality that relies on an exact and correct number of clients that listen/subscribed to a specific channel it will be broken or behave wrong. Currently the only known workarounds is to: - Ignore the returned value - All clients talk to the same server - Use a non clustered redis server for pubsub operations Discussion on this topic can be found here: https://groups.google.com/forum/?hl=sv#!topic/redis-db/BlwSOYNBUl8 Scalability issues ------------------ The following part is from this discussion https://groups.google.com/forum/?hl=sv#!topic/redis-db/B0_fvfDWLGM and it describes the scalability issue that pubsub has and the performance that goes with it when used in a cluster environment. according to [1] and [2] PubSub works by broadcasting every publish to every other Redis Cluster node. This limits the PubSub throughput to the bisection bandwidth of the underlying network infrastructure divided by the number of nodes times message size. So if a typical message has 1KB, the cluster has 10 nodes and bandwidth is 1 GBit/s, throughput is already limited to 12.5K RPS. If we increase the message size to 5 KB and the number of nodes to 50, we only get 500 RPS much less than a single Redis instance could service (>100K RPS), while putting maximum pressure on the network. PubSub thus scales linearly wrt. to the cluster size, but in the the negative direction! How pubsub works in RedisCluster -------------------------------- In release `1.2.0` the pubsub was code was reworked to now work like this. For `PUBLISH` and `SUBSCRIBE` commands: - The channel name is hashed and the keyslot is determined. - Determine the node that handles the keyslot. - Send the command to the node. The old solution was that all pubsub connections would talk to the same node all the time. This would ensure that the commands would work. This new solution is probably future safe and it will probably be a similar solution when `redis` fixes the scalability issues. Known limitations with pubsub ----------------------------- Pattern subscribe and publish do not work properly because if we hash a pattern like `fo*` we will get a keyslot for that string but there is a endless posiblity of channel names based on that pattern that we can't know in advance. This feature is not limited but the commands is not recommended to use right now. The implemented solution will only work if other clients use/adopt the same behaviour. If some other client behaves differently, there might be problems with `PUBLISH` and `SUBSCRIBE` commands behaving wrong. Other solutions --------------- The simplest solution is to have a seperate non clustered redis instance that you have a regular `Redis` instance that works with your pubsub code. It is not recommended to use pubsub until `redis` fixes the implementation in the server itself. redis-py-cluster-2.0.0/docs/readonly-mode.rst000066400000000000000000000040071352661744600211640ustar00rootroot00000000000000Readonly mode ============= By default, Redis Cluster always returns MOVE redirection response on accessing slave node. You can overcome this limitation [for scaling read with READONLY mode](http://redis.io/topics/cluster-spec#scaling-reads-using-slave-nodes). redis-py-cluster also implements this mode. You can access slave by passing `readonly_mode=True` to RedisCluster (or RedisCluster) constructor. .. code-block:: python >>> from rediscluster import RedisCluster >>> startup_nodes = [{"host": "127.0.0.1", "port": "7000"}] >>> rc = RedisCluster(startup_nodes=startup_nodes, decode_responses=True) >>> rc.set("foo16706", "bar") >>> rc.set("foo81", "foo") True >>> rc_readonly = RedisCluster(startup_nodes=startup_nodes, decode_responses=True, readonly_mode=True) >>> rc_readonly.get("foo16706") u'bar' >>> rc_readonly.get("foo81") u'foo' We can use pipeline via `readonly_mode=True` object. .. code-block:: python >>> with rc_readonly.pipeline() as readonly_pipe: ... readonly_pipe.get('foo81') ... readonly_pipe.get('foo16706') ... readonly_pipe.execute() ... [u'foo', u'bar'] But this mode has some downside or limitations. - It is possible that you cannot get the latest data from READONLY mode enabled object because Redis implements asynchronous replication. - **You MUST NOT use SET related operation with READONLY mode enabled object**, otherwise you can possibly get 'Too many Cluster redirections' error because we choose master and its slave nodes randomly. - You should use get related stuff only. - Ditto with pipeline, otherwise you can get 'Command # X (XXXX) of pipeline: MOVED' error. .. code-block:: python >>> rc_readonly = RedisCluster(startup_nodes=startup_nodes, decode_responses=True, readonly_mode=True) >>> # NO: This works in almost case, but possibly emits Too many Cluster redirections error... >>> rc_readonly.set('foo', 'bar') >>> # OK: You should always use get related stuff... >>> rc_readonly.get('foo') redis-py-cluster-2.0.0/docs/release-notes.rst000066400000000000000000000265741352661744600212100ustar00rootroot00000000000000Release Notes ============= 2.0.0 (Aug 12, 2019) Specific changes to redis-py-cluster is mentioned below here. * Update entire code base to now support all redis-py version in the 3.0.x version line. Any future redis-py version will be supported at a later time. * Major update to all tests to mirror the code of the same tests from redis-py * Dropped support for the 2.10.6 redis-py release. * Add pythoncodestyle lint validation check to travis-ci runs to check for proper linting before accepting PR:s * Class StrictRedisCluster was renamed to RedisCluster * Class StrictRedis has been removed to mirror upstream class structure * Class StrictClusterPipeline was renamed to ClusterPipeline * Fixed travis-ci tests not running properly on python 3.7 * Fixed documentation regarding threads in pipelines * Update lit of command callbacks and parsers. Added in "CLIENT ID" * Removed custom implementation of SORT and revert back to use same-slot mechanism for that command. * Added better exception message to get_master_node_by_slot command to help the user understand the error. * Improved the exception object message parsing when running on python3 1.3.6 (Nov 16, 2018) -------------------- * Pin upstream redis-py package to release 2.10.6 to avoid issues with incompatible version 3.0.0 1.3.5 (July 22, 2018) --------------------- * Add Redis 4 compatability fix to CLUSTER NODES command (See issue #217) * Fixed bug with command "CLUSTER GETKEYSINSLOT" that was throwing exceptions * Added new methods cluster_get_keys_in_slot() to client * Fixed bug with `StrictRedisCluster.from_url` that was ignoring the `readonly_mode` parameter * NodeManager will now ignore nodes showing cluster errors when initializing the cluster * Fix bug where RedisCluster wouldn't refresh the cluster table when executing commands on specific nodes * Add redis 5.0 to travis-ci tests * Change default redis version from 3.0.7 to 4.0.10 * Increase accepted ranges of dependencies specefied in dev-requirements.txt * Several major and minor documentation updates and tweaks * Add example script "from_url_password_protected.py" * command "CLUSTER GETKEYSINSLOT" is now returned as a list and not int * Improve support for ssl connections * Retry on Timeout errors when doing cluster discovery * Added new error class "MasterDownError" * Updated requirements for dependency of redis-py to latest version 1.3.4 (Mar 5, 2017) ------------------- * Package is now built as a wheel and source package when releases is built. * Fixed issues with some key types in `NodeManager.keyslot()`. * Add support for `PUBSUB` subcommands `CHANNELS`, `NUMSUB [arg] [args...]` and `NUMPAT`. * Add method `set_result_callback(command, callback)` allowing the default reply callbacks to be changed, in the same way `set_response_callback(command, callback)` inherited from Redis-Py does for responses. * Node manager now honors defined max_connections variable so connections that is emited from that class uses the same variable. * Fixed a bug in cluster detection when running on python 3.x and decode_responses=False was used. Data back from redis for cluster structure is now converted no matter what the data you want to set/get later is using. * Add SSLClusterConnection for connecting over TLS/SSL to Redis Cluster * Add new option to make the nodemanager to follow the cluster when nodes move around by avoiding to query the original list of startup nodes that was provided when the client object was first created. This could make the client handle drifting clusters on for example AWS easier but there is a higher risk of the client talking to the wrong group of nodes during split-brain event if the cluster is not consistent. This feature is EXPERIMENTAL and use it with care. 1.3.3 (Dec 15, 2016) -------------------- * Remove print statement that was faulty commited into release 1.3.2 that case logs to fill up with unwanted data. 1.3.2 (Nov 27, 2016) -------------------- * Fix a bug where from_url was not possible to use without passing in additional variables. Now it works as the same method from redis-py. Note that the same rules that is currently in place for passing ip addresses/dns names into startup_nodes variable apply the same way through the from_url method. * Added options to skip full coverage check. This flag is useful when the CONFIG redis command is disabled by the server. * Fixed a bug where method *CLUSTER SLOTS* would break in newer redis versions where node id is included in the reponse. Method is not compatible with both old and new redis versions. 1.3.1 (Oct 13, 2016) -------------------- * Rebuilt broken method scan_iter. Previous tests was to small to detect the problem but is not corrected to work on a bigger dataset during the test of that method. (korvus81, Grokzen, RedWhiteMiko) * Errors in pipeline that should be retried, like connection errors, moved, errors and ask errors now fall back to single operation logic in StrictRedisCluster.execute_command. (72squared). * Moved reinitialize_steps and counter into nodemanager so it can be correctly counted across pipeline operations (72squared). 1.3.0 (Sep 11, 2016) -------------------- * Removed RedisClusterMgt class and file * Fixed a bug when using pipelines with RedisCluster class (Ozahata) * Bump redis-server during travis tests to 3.0.7 * Added docs about same module name in another python redis cluster project. * Fix a bug when a connection was to be tracked for a node but the node either do not yet exists or was removed because of resharding was done in another thread. (ashishbaghudana) * Fixed a bug with "CLUSTER ..." commands when a node_id argument was needed and the return type was supposed to be converted to bool with bool_ok in redis._compat. * Add back gitter chat room link * Add new client commands - cluster_reset_all_nodes * Command cluster_delslots now determines what cluster shard each slot is on and sends each slot deletion command to the correct node. Command have changed argument spec (Read Upgrading.rst for details) * Fixed a bug when hashing the key it if was a python 3 byte string and it would cause it to route to wrong slot in the cluster (fossilet, Grokzen) * Fixed a bug when reinitialize the nodemanager it would use the old nodes_cache instead of the new one that was just parsed (monklof) 1.2.0 (Apr 09, 2016) -------------------- * Drop maintained support for python 3.2. * Remove Vagrant file in favor for repo maintained by 72squared * Add Support for password protected cluster (etng) * Removed assertion from code (gmolight) * Fixed a bug where a regular connection pool was allocated with each StrictRedisCluster instance. * Rework pfcount to now work as expected when all arguments points to same hashslot * New code and important changes from redis-py 2.10.5 have been added to the codebase. * Removed the need for threads inside of pipeline. We write the packed commands all nodes before reading the responses which gives us even better performance than threads, especially as we add more nodes to the cluster. * Allow passing in a custom connection pool * Provide default max_connections value for ClusterConnectionPool *(2**31)* * Travis now tests both redis 3.0.x and 3.2.x * Add simple ptpdb debug script to make it easier to test the client * Fix a bug in sdiffstore (mt3925) * Fix a bug with scan_iter where duplicate keys would be returned during itteration * Implement all "CLUSTER ..." commands as methods in the client class * Client now follows the service side setting 'cluster-require-full-coverage=yes/no' (baranbartu) * Change the pubsub implementation (PUBLISH/SUBSCRIBE commands) from using one single node to now determine the hashslot for the channel name and use that to connect to a node in the cluster. Other clients that do not use this pattern will not be fully compatible with this client. Known limitations is pattern subscription that do not work properly because a pattern can't know all the possible channel names in advance. * Convert all docs to ReadTheDocs * Rework connection pool logic to be more similar to redis-py. This also fixes an issue with pubsub and that connections was never release back to the pool of available connections. 1.1.0 (Oct 27, 2015) ------------------- * Refactored exception handling and exception classes. * Added READONLY mode support, scales reads using slave nodes. * Fix __repr__ for ClusterConnectionPool and ClusterReadOnlyConnectionPool * Add max_connections_per_node parameter to ClusterConnectionPool so that max_connections parameter is calculated per-node rather than across the whole cluster. * Improve thread safty of get_connection_by_slot and get_connection_by_node methods (iandyh) * Improved error handling when sending commands to all nodes, e.g. info. Now the connection takes retry_on_timeout as an option and retry once when there is a timeout. (iandyh) * Added support for SCRIPT LOAD, SCRIPT FLUSH, SCRIPT EXISTS and EVALSHA commands. (alisaifee) * Improve thread safety to avoid exceptions when running one client object inside multiple threads and doing resharding of the cluster at the same time. * Fix ASKING error handling so now it really sends ASKING to next node during a reshard operation. This improvement was also made to pipelined commands. * Improved thread safety in pipelined commands, along better explanation of the logic inside pipelining with code comments. 1.0.0 (Jun 10, 2015) ------------------- * No change to anything just a bump to 1.0.0 because the lib is now considered stable/production ready. 0.3.0 (Jun 9, 2015) ------------------- * simple benchmark now uses docopt for cli parsing * New make target to run some benchmarks 'make benchmark' * simple benchmark now support pipelines tests * Renamed RedisCluster --> StrictRedisCluster * Implement backwards compatible redis.Redis class in cluster mode. It was named RedisCluster and everyone updating from 0.2.0 to 0.3.0 should consult docs/Upgrading.md for instructions how to change your code. * Added comprehensive documentation regarding pipelines * Meta retrieval commands(slots, nodes, info) for Redis Cluster. (iandyh) 0.2.0 (Dec 26, 2014) ------------------- * Moved pipeline code into new file. * Code now uses a proper cluster connection pool class that handles all nodes and connections similar to how redis-py do. * Better support for pubsub. All clients will now talk to the same server because pubsub commands do not work reliably if it talks to a random server in the cluster. * Better result callbacks and node routing support. No more ugly decorators. * Fix keyslot command when using non ascii characters. * Add bitpos support, redis-py 2.10.2 or higher required. * Fixed a bug where vagrant users could not build the package via shared folder. * Better support for CLUSTERDOWN error. (Neuront) * Parallel pipeline execution using threads. (72squared) * Added vagrant support for testing and development. (72squared) * Improve stability of client during resharding operations (72squared) 0.1.0 (Sep 29, 2014) ------------------- * Initial release * First release uploaded to pypi redis-py-cluster-2.0.0/docs/scripting.rst000066400000000000000000000020611352661744600204250ustar00rootroot00000000000000# Scripting support Scripting support is limited to scripts that operate on keys in the same key slot. If a script is executed via `evalsha`, `eval` or by calling the callable returned by `register_script` and the keys passed as arguments do not map to the same key slot, a `RedisClusterException` will be thrown. It is however, possible to query a key within the script, that is not passed as an argument of `eval`, `evalsha`. In this scenarios it is not possible to detect the error early and redis itself will raise an error which will be percolated to the user. For example: ```python cluster = RedisCluster('localhost', 7000) script = """ return redis.call('GET', KEYS[1]) * redis.call('GET', ARGV[1]) """ # this will succeed cluster.eval(script, 1, "A{Foo}", "A{Foo}") # this will fail as "A{Foo}" and "A{Bar}" are on different key slots. cluster.eval(script, 1, "A{Foo}", "A{Bar}") ``` ## Unsupported operations - The `SCRIPT KILL` command is not yet implemented. - Scripting in the context of a pipeline is not yet implemented. redis-py-cluster-2.0.0/docs/testing.rst000066400000000000000000000007221352661744600201020ustar00rootroot00000000000000Testing ======= All tests are currently built around a 6 redis server cluster setup (3 masters + 3 slaves). One server must be using port 7000 for redis cluster discovery. The easiest way to setup a cluster is to use either a Docker or Vagrant. They are both described in [Setup a redis cluster. Manually, Docker & Vagrant](docs/Cluster_Setup.md). Tox --- To run all tests in all supported environments with `tox` read this [Tox multienv testing](docs/Tox.md) redis-py-cluster-2.0.0/docs/tox.rst000066400000000000000000000010601352661744600172330ustar00rootroot00000000000000# Tox - Multi environment testing Tox is the easiest way to run all tests because it will manage all dependencies and run the correct test command for you. TravisCI will use tox to run tests on all supported python & hiredis versions. Install tox with `pip install tox` To run all environments you need all supported python versions installed on your machine. (See supported python versions list) and you also need the python-dev package for all python versions to build hiredis. To run a specific python version use either `tox -e py27` or `tox -e py34` redis-py-cluster-2.0.0/docs/upgrading.rst000066400000000000000000000176421352661744600204160ustar00rootroot00000000000000Upgrading redis-py-cluster ========================== This document describes what must be done when upgrading between different versions to ensure that code still works. 1.3.x --> 2.0.0 --------------- Redis-py upstream package dependency has now been updated to be any of the releases in the major version line 3.0.x. This means that you must upgrade your dependency from 2.10.6 to the latest version. Several internal components have been updated to reflect the code from 3.0.x. Class StrictRedisCluster was renamed to RedisCluster. All usages of this class must be updated. Class StrictRedis has been removed to mirror upstream class structure. Class StrictClusterPipeline was renamed to ClusterPipeline. Method SORT has been changed back to only allow to be executed if keys is in the same slot. No more client side parsing and handling of the keys and values. 1.3.2 --> Next Release ---------------------- If you created the `StrictRedisCluster` (or `RedisCluster`) instance via the `from_url` method and were passing `readonly_mode` to it, the connection pool created will now properly allow selecting read-only slaves from the pool. Previously it always used master nodes only, even in the case of `readonly_mode=True`. Make sure your code don't attempt any write commands over connections with `readonly_mode=True`. 1.3.1 --> 1.3.2 --------------- If your redis instance is configured to not have the `CONFIG ...` commands enabled due to security reasons you need to pass this into the client object `skip_full_coverage_check=True`. Benefits is that the client class no longer requires the `CONFIG ...` commands to be enabled on the server. Downsides is that you can't use the option in your redis server and still use the same feature in this client. 1.3.0 --> 1.3.1 --------------- Method `scan_iter` was rebuilt becuase it was broken and did not perform as expected. If you are using this method you should be carefull with this new implementation and test it through before using it. The expanded testing for that method indicates it should work without problems. If you find any issues with the new method please open a issue on github. A major refactoring was performed in the pipeline system that improved error handling and reliability of execution. It also simplified the code alot to make it easier to understand and continue to develop in the future. Becuase of this major refactoring you should really test throuhg your pipeline code to ensure that none of your code is broken because of this refactoring. 1.2.0 --> Next release ---------------------- Class RedisClusterMgt has been removed. You should use the `CLUSTER ...` methods that exists in the `StrictRedisCluster` client class. Method `cluster_delslots` changed argument specification from `self, node_id, *slots` to `self, *slots` and changed the behaviour of the method to now automatically determine the slot_id based on the current cluster structure and where each slot that you want to delete is loated. Method pfcount no longer has custom logic and exceptions to prevent CROSSSLOT errors. If method is used with different slots then a regular CROSSSLOT error (rediscluster.exceptions.ClusterCrossSlotError) will be returned. 1.1.0 --> 1.2.0 -------------- Discontinue passing `pipeline_use_threads` flag to `rediscluster.StrictRedisCluster` or `rediscluster.RedisCluster`. Also discontinue passing `use_threads` flag to the pipeline() method. In 1.1.0 and prior, you could use `pipeline_use_threads` flag to tell the client to perform queries to the different nodes in parallel via threads. We exposed this as a flag because using threads might have been risky and we wanted people to be able to disable it if needed. With this release we figured out how to get parallelization of the commands without the need for threads. We write to all the nodes before reading from them, essentially multiplexing the connections (but without the need for complicated socket multiplexing). We found this approach to be faster and more scalable as more nodes are added to the cluster. That means we don't need the `pipeline_use_threads` flag anymore, or the `use_threads` flag that could be passed into the instantiation of the pipeline object itself. The logic is greatly simplified and the default behavior will now come with a performance boost and no need to use threads. Publish and subscribe no longer connects to a single instance. It now hashes the channel name and uses that to determine what node to connect to. More work will be done in the future when `redis-server` improves the pubsub implementation. Please read up on the documentation about pubsub in the `docs/pubsub.md` file about the problems and limitations on using a pubsub in a cluster. Commands Publish and Subscribe now uses the same connections as any other commands. If you are using any pubsub commands you need to test it through thoroughly to ensure that your implementation still works. To use less strict cluster slots discovery you can add the following config to your redis-server config file "cluster-require-full-coverage=no" and this client will honour that setting and not fail if not all slots is covered. A bug was fixed in 'sdiffstore', if you are using this, verify that your code still works as expected. Class RedisClusterMgt is now deprecated and will be removed in next release in favor of all cluster commands implemented in the client in this release. 1.0.0 --> 1.1.0 --------------- The following exceptions have been changed/added and code that use this client might have to be updated to handle the new classes. `raise RedisClusterException("Too many Cluster redirections")` have been changed to `raise ClusterError('TTL exhausted.')` `ClusterDownException` have been replaced with `ClusterDownError` Added new `AskError` exception class. Added new `TryAgainError` exception class. Added new `MovedError` exception class. Added new `ClusterCrossSlotError` exception class. Added optional `max_connections_per_node` parameter to `ClusterConnectionPool` which changes behavior of `max_connections` so that it applies per-node rather than across the whole cluster. The new feature is opt-in, and the existing default behavior is unchanged. Users are recommended to opt-in as the feature fixes two important problems. First is that some nodes could be starved for connections after max_connections is used up by connecting to other nodes. Second is that the asymmetric number of connections across nodes makes it challenging to configure file descriptor and redis max client settings. Reinitialize on `MOVED` errors will not run on every error but instead on every 25 error to avoid excessive cluster reinitialize when used in multiple threads and resharding at the same time. If you want to go back to the old behaviour with reinitialize on every error you should pass in `reinitialize_steps=1` to the client constructor. If you want to increase or decrease the intervall of this new behaviour you should set `reinitialize_steps` in the client constructor to a value that you want. Pipelines in general have recieved alot of attention so if you are using pipelines in your code, ensure that you test the new code out alot before using it to make sure it still works as you expect. The entire client code should now be safer to use in a threaded environment. Some race conditions was found and have now been fixed and it should prevent the code from behaving wierd during reshard operations. 0.2.0 --> 0.3.0 --------------- In `0.3.0` release the name of the client class was changed from `RedisCluster` to `StrictRedisCluster` and a new implementation of `RedisCluster` was added that is based on `redis.Redis` class. This was done to enable implementation a cluster enabled version of `redis.Redis` class. Because of this all imports and usage of `RedisCluster` must be changed to `StrictRedisCluster` so that existing code will remain working. If this is not done some issues could arise in existing code. 0.1.0 --> 0.2.0 --------------- No major changes was done. redis-py-cluster-2.0.0/examples/000077500000000000000000000000001352661744600165605ustar00rootroot00000000000000redis-py-cluster-2.0.0/examples/README.md000066400000000000000000000006661352661744600200470ustar00rootroot00000000000000In this folder, you will find some example scripts that will both demonstrate some examples on how to use certain functionality and it also function as test scripts to ensure the client works as expected during usage of those functionalities. To really ensure the scripts is working, they should be runned during a resharding operation to ensure that all redirection and fault handling code works without throwing any unexpected errors. redis-py-cluster-2.0.0/examples/basic.py000066400000000000000000000004261352661744600202150ustar00rootroot00000000000000from rediscluster import RedisCluster startup_nodes = [{"host": "127.0.0.1", "port": "7000"}] # Note: decode_responses must be set to True when used with python3 rc = RedisCluster(startup_nodes=startup_nodes, decode_responses=True) rc.set("foo", "bar") print(rc.get("foo")) redis-py-cluster-2.0.0/examples/basic_password_protected.py000066400000000000000000000004701352661744600242070ustar00rootroot00000000000000from rediscluster import RedisCluster startup_nodes = [{"host": "127.0.0.1", "port": "7100"}] # Note: decode_responses must be set to True when used with python3 rc = RedisCluster(startup_nodes=startup_nodes, decode_responses=True, password='password_is_protected') rc.set("foo", "bar") print(rc.get("foo")) redis-py-cluster-2.0.0/examples/from_url_password_protected.py000066400000000000000000000003011352661744600247440ustar00rootroot00000000000000from rediscluster import RedisCluster url="redis://:R1NFTBWTE1@10.127.91.90:6572/0" rc = RedisCluster.from_url(url, skip_full_coverage_check=True) rc.set("foo", "bar") print(rc.get("foo")) redis-py-cluster-2.0.0/examples/generate_slot_keys.py000066400000000000000000000013621352661744600230220ustar00rootroot00000000000000import random import string import sys from rediscluster import RedisCluster startup_nodes = [{"host": "127.0.0.1", "port": "7000"}] # Note: decode_responses must be set to True when used with python3 rc = RedisCluster(startup_nodes=startup_nodes, decode_responses=True) # 10 batches batch_set = {i: [] for i in range(0, 16384)} # Do 100000 slot randos in each block for j in range(0, 100000): rando_string = ''.join(random.choice(string.ascii_uppercase + string.digits) for _ in range(5)) keyslot = rc.connection_pool.nodes.keyslot(rando_string) # batch_set.setdefault(keyslot) batch_set[keyslot].append(rando_string) for i in range(0, 16384): if len(batch_set[i]) > 0: print(i, ':', batch_set[i]) sys.exit(0) redis-py-cluster-2.0.0/examples/incr-test-writer.py000066400000000000000000000004441352661744600223560ustar00rootroot00000000000000from redis._compat import xrange from rediscluster import RedisCluster startup_nodes = [{"host": "127.0.0.1", "port": 7000}] r = RedisCluster(startup_nodes=startup_nodes, max_connections=32, decode_responses=True) for i in xrange(1000000): d = str(i) r.set(d, d) r.incrby(d, 1) redis-py-cluster-2.0.0/examples/pipeline-incrby.py000066400000000000000000000012141352661744600222210ustar00rootroot00000000000000from redis._compat import xrange from rediscluster import RedisCluster startup_nodes = [{"host": "127.0.0.1", "port": 7000}] r = RedisCluster(startup_nodes=startup_nodes, max_connections=32, decode_responses=True) for i in xrange(1000000): d = str(i) pipe = r.pipeline(transaction=False) pipe.set(d, d) pipe.incrby(d, 1) pipe.execute() pipe = r.pipeline(transaction=False) pipe.set("foo-{0}".format(d), d) pipe.incrby("foo-{0}".format(d), 1) pipe.set("bar-{0}".format(d), d) pipe.incrby("bar-{0}".format(d), 1) pipe.set("bazz-{0}".format(d), d) pipe.incrby("bazz-{0}".format(d), 1) pipe.execute() redis-py-cluster-2.0.0/ptp-debug.py000066400000000000000000000005071352661744600172050ustar00rootroot00000000000000from rediscluster import RedisCluster startup_nodes = [{"host": "127.0.0.1", "port": "7000"}] # Note: decode_responses must be set to True when used with python3 rc = RedisCluster(startup_nodes=startup_nodes, decode_responses=True) url_client = RedisCluster.from_url('http://127.0.0.1:7000') __import__('ptpdb').set_trace() redis-py-cluster-2.0.0/rediscluster/000077500000000000000000000000001352661744600174525ustar00rootroot00000000000000redis-py-cluster-2.0.0/rediscluster/__init__.py000066400000000000000000000014141352661744600215630ustar00rootroot00000000000000# -*- coding: utf-8 -*- # python std lib import sys # Import shortcut from .client import RedisCluster from .pipeline import ClusterPipeline from .pubsub import ClusterPubSub # Monkey patch RedisCluster class into redis for easy access import redis setattr(redis, "RedisCluster", RedisCluster) setattr(redis, "ClusterPubSub", ClusterPubSub) setattr(redis, "ClusterPipeline", ClusterPipeline) # Major, Minor, Fix version __version__ = (2, 0, 0) def int_or_str(value): try: return int(value) except ValueError: return value __version__ = '2.0.0' VERSION = tuple(map(int_or_str, __version__.split('.'))) if sys.version_info[0:3] == (3, 4, 0): raise RuntimeError("CRITICAL: rediscluster do not work with python 3.4.0. Please use 3.4.1 or higher.") redis-py-cluster-2.0.0/rediscluster/client.py000066400000000000000000001140671352661744600213130ustar00rootroot00000000000000# -*- coding: utf-8 -*- from __future__ import unicode_literals # python std lib import datetime import random import string import time # rediscluster imports from .connection import ( ClusterConnectionPool, ClusterReadOnlyConnectionPool, ClusterWithReadReplicasConnectionPool, SSLClusterConnection, ) from .exceptions import ( RedisClusterException, AskError, MovedError, ClusterDownError, ClusterError, TryAgainError ) from .pubsub import ClusterPubSub from .utils import ( bool_ok, string_keys_to_dict, dict_merge, blocked_command, merge_result, first_key, clusterdown_wrapper, parse_cluster_slots, parse_cluster_nodes, parse_pubsub_channels, parse_pubsub_numsub, parse_pubsub_numpat, ) # 3rd party imports from redis import Redis from redis.client import list_or_args, parse_info from redis.connection import Token from redis._compat import iteritems, basestring, izip, nativestr, long from redis.exceptions import RedisError, ResponseError, TimeoutError, DataError, ConnectionError, BusyLoadingError class RedisCluster(Redis): """ If a command is implemented over the one in Redis then it requires some changes compared to the regular implementation of the method. """ RedisClusterRequestTTL = 16 NODES_FLAGS = dict_merge( string_keys_to_dict([ "CLIENT SETNAME", "SENTINEL GET-MASTER-ADDR-BY-NAME", 'SENTINEL MASTER', 'SENTINEL MASTERS', 'SENTINEL MONITOR', 'SENTINEL REMOVE', 'SENTINEL SENTINELS', 'SENTINEL SET', 'SENTINEL SLAVES', 'SHUTDOWN', 'SLAVEOF', 'SCRIPT KILL', 'MOVE', 'BITOP', ], 'blocked'), string_keys_to_dict([ "ECHO", "CONFIG GET", "CONFIG SET", "SLOWLOG GET", "CLIENT KILL", "INFO", "BGREWRITEAOF", "BGSAVE", "CLIENT LIST", "CLIENT GETNAME", "CONFIG RESETSTAT", "CONFIG REWRITE", "DBSIZE", "LASTSAVE", "PING", "SAVE", "SLOWLOG LEN", "SLOWLOG RESET", "TIME", "KEYS", "CLUSTER INFO", "PUBSUB CHANNELS", "PUBSUB NUMSUB", "PUBSUB NUMPAT", "CLIENT ID", ], 'all-nodes'), string_keys_to_dict([ "FLUSHALL", "FLUSHDB", "SCRIPT LOAD", "SCRIPT FLUSH", "SCRIPT EXISTS", "SCAN", ], 'all-masters'), string_keys_to_dict([ "RANDOMKEY", "CLUSTER NODES", "CLUSTER SLOTS", ], 'random'), string_keys_to_dict([ "CLUSTER COUNTKEYSINSLOT", "CLUSTER GETKEYSINSLOT", ], 'slot-id'), ) # Not complete, but covers the major ones # https://redis.io/commands READ_COMMANDS = [ "BITPOS", "BITCOUNT", "EXISTS", "GEOHASH", "GEOPOS", "GEODIST", "GEORADIUS", "GEORADIUSBYMEMBER", "GET", "GETBIT", "GETRANGE", "HEXISTS", "HGET", "HGETALL", "HKEYS", "HLEN", "HMGET", "HSTRLEN", "HVALS", "KEYS", "LINDEX", "LLEN", "LRANGE", "MGET", "PTTL", "RANDOMKEY", "SCARD", "SDIFF", "SINTER", "SISMEMBER", "SMEMBERS", "SRANDMEMBER", "STRLEN", "SUNION", "TTL", "ZCARD", "ZCOUNT", "ZRANGE", "ZSCORE" ] RESULT_CALLBACKS = dict_merge( string_keys_to_dict([ "ECHO", "CONFIG GET", "CONFIG SET", "SLOWLOG GET", "CLIENT KILL", "INFO", "BGREWRITEAOF", "BGSAVE", "CLIENT LIST", "CLIENT GETNAME", "CONFIG RESETSTAT", "CONFIG REWRITE", "DBSIZE", "LASTSAVE", "PING", "SAVE", "SLOWLOG LEN", "SLOWLOG RESET", "TIME", "SCAN", "CLUSTER INFO", 'CLUSTER ADDSLOTS', 'CLUSTER COUNT-FAILURE-REPORTS', 'CLUSTER DELSLOTS', 'CLUSTER FAILOVER', 'CLUSTER FORGET', "FLUSHALL", "FLUSHDB", "CLIENT ID", ], lambda command, res: res), string_keys_to_dict([ "SCRIPT LOAD", ], lambda command, res: list(res.values()).pop()), string_keys_to_dict([ "SCRIPT EXISTS", ], lambda command, res: [all(k) for k in zip(*res.values())]), string_keys_to_dict([ "SCRIPT FLUSH", ], lambda command, res: all(res.values())), string_keys_to_dict([ "KEYS", ], merge_result), string_keys_to_dict([ "SSCAN", "HSCAN", "ZSCAN", "RANDOMKEY", ], first_key), string_keys_to_dict([ "PUBSUB CHANNELS", ], parse_pubsub_channels), string_keys_to_dict([ "PUBSUB NUMSUB", ], parse_pubsub_numsub), string_keys_to_dict([ "PUBSUB NUMPAT", ], parse_pubsub_numpat), ) CLUSTER_COMMANDS_RESPONSE_CALLBACKS = { 'CLUSTER ADDSLOTS': bool_ok, 'CLUSTER COUNT-FAILURE-REPORTS': int, 'CLUSTER COUNTKEYSINSLOT': int, 'CLUSTER DELSLOTS': bool_ok, 'CLUSTER FAILOVER': bool_ok, 'CLUSTER FORGET': bool_ok, 'CLUSTER GETKEYSINSLOT': list, 'CLUSTER INFO': parse_info, 'CLUSTER KEYSLOT': int, 'CLUSTER MEET': bool_ok, 'CLUSTER NODES': parse_cluster_nodes, 'CLUSTER REPLICATE': bool_ok, 'CLUSTER RESET': bool_ok, 'CLUSTER SAVECONFIG': bool_ok, 'CLUSTER SET-CONFIG-EPOCH': bool_ok, 'CLUSTER SETSLOT': bool_ok, 'CLUSTER SLAVES': parse_cluster_nodes, 'CLUSTER SLOTS': parse_cluster_slots, 'ASKING': bool_ok, 'READONLY': bool_ok, 'READWRITE': bool_ok, } def __init__(self, host=None, port=None, startup_nodes=None, max_connections=None, max_connections_per_node=False, init_slot_cache=True, readonly_mode=False, reinitialize_steps=None, skip_full_coverage_check=False, nodemanager_follow_cluster=False, connection_class=None, read_from_replicas=False, **kwargs): """ :startup_nodes: List of nodes that initial bootstrapping can be done from :host: Can be used to point to a startup node :port: Can be used to point to a startup node :max_connections: Maximum number of connections that should be kept open at one time :readonly_mode: enable READONLY mode. You can read possibly stale data from slave. :skip_full_coverage_check: Skips the check of cluster-require-full-coverage config, useful for clusters without the CONFIG command (like aws) :nodemanager_follow_cluster: The node manager will during initialization try the last set of nodes that it was operating on. This will allow the client to drift along side the cluster if the cluster nodes move around alot. :**kwargs: Extra arguments that will be sent into Redis instance when created (See Official redis-py doc for supported kwargs [https://github.com/andymccurdy/redis-py/blob/master/redis/client.py]) Some kwargs is not supported and will raise RedisClusterException - db (Redis do not support database SELECT in cluster mode) """ # Tweaks to Redis client arguments when running in cluster mode if "db" in kwargs: raise RedisClusterException("Argument 'db' is not possible to use in cluster mode") if kwargs.pop('ssl', False): # Needs to be removed to avoid exception in redis Connection init connection_class = SSLClusterConnection if "connection_pool" in kwargs: pool = kwargs.pop('connection_pool') else: startup_nodes = [] if startup_nodes is None else startup_nodes # Support host/port as argument if host: startup_nodes.append({"host": host, "port": port if port else 7000}) if readonly_mode: connection_pool_cls = ClusterReadOnlyConnectionPool elif read_from_replicas: connection_pool_cls = ClusterWithReadReplicasConnectionPool else: connection_pool_cls = ClusterConnectionPool pool = connection_pool_cls( startup_nodes=startup_nodes, init_slot_cache=init_slot_cache, max_connections=max_connections, reinitialize_steps=reinitialize_steps, max_connections_per_node=max_connections_per_node, skip_full_coverage_check=skip_full_coverage_check, nodemanager_follow_cluster=nodemanager_follow_cluster, connection_class=connection_class, **kwargs ) super(RedisCluster, self).__init__(connection_pool=pool, **kwargs) self.refresh_table_asap = False self.nodes_flags = self.__class__.NODES_FLAGS.copy() self.result_callbacks = self.__class__.RESULT_CALLBACKS.copy() self.response_callbacks = self.__class__.RESPONSE_CALLBACKS.copy() self.response_callbacks = dict_merge(self.response_callbacks, self.CLUSTER_COMMANDS_RESPONSE_CALLBACKS) self.read_from_replicas = read_from_replicas @classmethod def from_url(cls, url, db=None, skip_full_coverage_check=False, readonly_mode=False, read_from_replicas=False, **kwargs): """ Return a Redis client object configured from the given URL, which must use either `the ``redis://`` scheme `_ for RESP connections or the ``unix://`` scheme for Unix domain sockets. For example:: redis://[:password]@localhost:6379/0 unix://[:password]@/path/to/socket.sock?db=0 There are several ways to specify a database number. The parse function will return the first specified option: 1. A ``db`` querystring option, e.g. redis://localhost?db=0 2. If using the redis:// scheme, the path argument of the url, e.g. redis://localhost/0 3. The ``db`` argument to this function. If none of these options are specified, db=0 is used. Any additional querystring arguments and keyword arguments will be passed along to the ConnectionPool class's initializer. In the case of conflicting arguments, querystring arguments always win. """ if readonly_mode: connection_pool_cls = ClusterReadOnlyConnectionPool elif read_from_replicas: connection_pool_cls = ClusterWithReadReplicasConnectionPool else: connection_pool_cls = ClusterConnectionPool connection_pool = connection_pool_cls.from_url(url, db=db, skip_full_coverage_check=skip_full_coverage_check, **kwargs) return cls(connection_pool=connection_pool, skip_full_coverage_check=skip_full_coverage_check) def __repr__(self): """ """ servers = list({'{0}:{1}'.format(nativestr(info['host']), info['port']) for info in self.connection_pool.nodes.startup_nodes}) servers.sort() return "{0}<{1}>".format(type(self).__name__, ', '.join(servers)) def set_result_callback(self, command, callback): "Set a custom Result Callback" self.result_callbacks[command] = callback def pubsub(self, **kwargs): """ """ return ClusterPubSub(self.connection_pool, **kwargs) def pipeline(self, transaction=None, shard_hint=None): """ Cluster impl: Pipelines do not work in cluster mode the same way they do in normal mode. Create a clone of this object so that simulating pipelines will work correctly. Each command will be called directly when used and when calling execute() will only return the result stack. """ if shard_hint: raise RedisClusterException("shard_hint is deprecated in cluster mode") if transaction: raise RedisClusterException("transaction is deprecated in cluster mode") return ClusterPipeline( connection_pool=self.connection_pool, startup_nodes=self.connection_pool.nodes.startup_nodes, result_callbacks=self.result_callbacks, response_callbacks=self.response_callbacks, ) def transaction(self, *args, **kwargs): """ Transaction is not implemented in cluster mode yet. """ raise RedisClusterException("method RedisCluster.transaction() is not implemented") def _determine_slot(self, *args): """ figure out what slot based on command and args """ if len(args) <= 1: raise RedisClusterException("No way to dispatch this command to Redis Cluster. Missing key.") command = args[0] if command in ['EVAL', 'EVALSHA']: numkeys = args[2] keys = args[3: 3 + numkeys] slots = {self.connection_pool.nodes.keyslot(key) for key in keys} if len(slots) != 1: raise RedisClusterException("{0} - all keys must map to the same key slot".format(command)) return slots.pop() key = args[1] # OBJECT command uses a special keyword as first positional argument if command == 'OBJECT': key = args[2] return self.connection_pool.nodes.keyslot(key) def _merge_result(self, command, res, **kwargs): """ `res` is a dict with the following structure Dict(NodeName, CommandResult) """ if command in self.result_callbacks: return self.result_callbacks[command](command, res, **kwargs) # Default way to handle result return first_key(command, res) def determine_node(self, *args, **kwargs): """ """ command = args[0] node_flag = self.nodes_flags.get(command) if node_flag == 'blocked': return blocked_command(self, command) elif node_flag == 'random': return [self.connection_pool.nodes.random_node()] elif node_flag == 'all-masters': return self.connection_pool.nodes.all_masters() elif node_flag == 'all-nodes': return self.connection_pool.nodes.all_nodes() elif node_flag == 'slot-id': return [self.connection_pool.nodes.node_from_slot(args[1])] else: return None @clusterdown_wrapper def execute_command(self, *args, **kwargs): """ Send a command to a node in the cluster """ if not args: raise RedisClusterException("Unable to determine command to use") command = args[0] # If set externally we must update it before calling any commands if self.refresh_table_asap: self.connection_pool.nodes.initialize() self.refresh_table_asap = False node = self.determine_node(*args, **kwargs) if node: return self._execute_command_on_nodes(node, *args, **kwargs) redirect_addr = None asking = False is_read_replica = False try_random_node = False slot = self._determine_slot(*args) ttl = int(self.RedisClusterRequestTTL) while ttl > 0: ttl -= 1 if asking: node = self.connection_pool.nodes.nodes[redirect_addr] r = self.connection_pool.get_connection_by_node(node) elif try_random_node: r = self.connection_pool.get_random_connection() try_random_node = False else: if self.refresh_table_asap: # MOVED node = self.connection_pool.get_master_node_by_slot(slot) else: node = self.connection_pool.get_node_by_slot(slot, self.read_from_replicas and (command in self.READ_COMMANDS)) is_read_replica = node['server_type'] == 'slave' r = self.connection_pool.get_connection_by_node(node) try: if asking: r.send_command('ASKING') self.parse_response(r, "ASKING", **kwargs) asking = False if is_read_replica: # Ask read replica to accept reads (see https://redis.io/commands/readonly) # TODO: do we need to handle errors from this response? r.send_command('READONLY') self.parse_response(r, 'READONLY', **kwargs) is_read_replica = False r.send_command(*args) return self.parse_response(r, command, **kwargs) except (RedisClusterException, BusyLoadingError): raise except (ConnectionError, TimeoutError): try_random_node = True if ttl < self.RedisClusterRequestTTL / 2: time.sleep(0.1) except ClusterDownError as e: self.connection_pool.disconnect() self.connection_pool.reset() self.refresh_table_asap = True raise e except MovedError as e: # Reinitialize on ever x number of MovedError. # This counter will increase faster when the same client object # is shared between multiple threads. To reduce the frequency you # can set the variable 'reinitialize_steps' in the constructor. self.refresh_table_asap = True self.connection_pool.nodes.increment_reinitialize_counter() node = self.connection_pool.nodes.set_node(e.host, e.port, server_type='master') self.connection_pool.nodes.slots[e.slot_id][0] = node except TryAgainError as e: if ttl < self.RedisClusterRequestTTL / 2: time.sleep(0.05) except AskError as e: redirect_addr, asking = "{0}:{1}".format(e.host, e.port), True finally: self.connection_pool.release(r) raise ClusterError('TTL exhausted.') def _execute_command_on_nodes(self, nodes, *args, **kwargs): """ """ command = args[0] res = {} for node in nodes: connection = self.connection_pool.get_connection_by_node(node) # copy from redis-py try: connection.send_command(*args) res[node["name"]] = self.parse_response(connection, command, **kwargs) except (ConnectionError, TimeoutError) as e: connection.disconnect() if not connection.retry_on_timeout and isinstance(e, TimeoutError): raise connection.send_command(*args) res[node["name"]] = self.parse_response(connection, command, **kwargs) except ClusterDownError as e: self.connection_pool.disconnect() self.connection_pool.reset() self.refresh_table_asap = True raise finally: self.connection_pool.release(connection) return self._merge_result(command, res, **kwargs) ########## # Cluster management commands def _nodes_slots_to_slots_nodes(self, mapping): """ Converts a mapping of {id: , slots: (slot1, slot2)} to {slot1: , slot2: } Operation is expensive so use with caution """ out = {} for node in mapping: for slot in node['slots']: out[str(slot)] = node['id'] return out def cluster_addslots(self, node_id, *slots): """ Assign new hash slots to receiving node Sends to specefied node """ return self.execute_command('CLUSTER ADDSLOTS', *slots, node_id=node_id) def cluster_countkeysinslot(self, slot_id): """ Return the number of local keys in the specified hash slot Send to node based on specefied slot_id """ return self.execute_command('CLUSTER COUNTKEYSINSLOT', slot_id) def cluster_count_failure_report(self, node_id): """ Return the number of failure reports active for a given node Sends to specefied node """ return self.execute_command('CLUSTER COUNT-FAILURE-REPORTS', node_id=node_id) def cluster_delslots(self, *slots): """ Set hash slots as unbound in the cluster. It determines by it self what node the slot is in and sends it there Returns a list of the results for each processed slot. """ cluster_nodes = self._nodes_slots_to_slots_nodes(self.cluster_nodes()) return [ self.execute_command('CLUSTER DELSLOTS', slot, node_id=cluster_nodes[slot]) for slot in slots ] def cluster_failover(self, node_id, option): """ Forces a slave to perform a manual failover of its master Sends to specefied node """ assert option.upper() in ('FORCE', 'TAKEOVER') # TODO: change this option handling return self.execute_command('CLUSTER FAILOVER', Token(option)) def cluster_info(self): """ Provides info about Redis Cluster node state Sends to random node in the cluster """ return self.execute_command('CLUSTER INFO') def cluster_keyslot(self, name): """ Returns the hash slot of the specified key Sends to random node in the cluster """ return self.execute_command('CLUSTER KEYSLOT', name) def cluster_meet(self, node_id, host, port): """ Force a node cluster to handshake with another node. Sends to specefied node """ return self.execute_command('CLUSTER MEET', host, port, node_id=node_id) def cluster_nodes(self): """ Force a node cluster to handshake with another node Sends to random node in the cluster """ return self.execute_command('CLUSTER NODES') def cluster_replicate(self, target_node_id): """ Reconfigure a node as a slave of the specified master node Sends to specefied node """ return self.execute_command('CLUSTER REPLICATE', target_node_id) def cluster_reset(self, node_id, soft=True): """ Reset a Redis Cluster node If 'soft' is True then it will send 'SOFT' argument If 'soft' is False then it will send 'HARD' argument Sends to specefied node """ return self.execute_command('CLUSTER RESET', Token('SOFT' if soft else 'HARD'), node_id=node_id) def cluster_reset_all_nodes(self, soft=True): """ Send CLUSTER RESET to all nodes in the cluster If 'soft' is True then it will send 'SOFT' argument If 'soft' is False then it will send 'HARD' argument Sends to all nodes in the cluster """ return [ self.execute_command( 'CLUSTER RESET', Token('SOFT' if soft else 'HARD'), node_id=node['id'], ) for node in self.cluster_nodes() ] def cluster_save_config(self): """ Forces the node to save cluster state on disk Sends to all nodes in the cluster """ return self.execute_command('CLUSTER SAVECONFIG') def cluster_get_keys_in_slot(self, slot, num_keys): """ Returns the number of keys in the specefied cluster slot """ return self.execute_command('CLUSTER GETKEYSINSLOT', slot, num_keys) def cluster_set_config_epoch(self, node_id, epoch): """ Set the configuration epoch in a new node Sends to specefied node """ return self.execute_command('CLUSTER SET-CONFIG-EPOCH', epoch, node_id=node_id) # TODO: Determine what the purpose of bind_to_node_ip is going to be def cluster_setslot(self, node_id, slot_id, state, bind_to_node_id=None): """ Bind an hash slot to a specific node Sends to specefied node """ if state.upper() in ('IMPORTING', 'MIGRATING', 'NODE') and node_id is not None: return self.execute_command('CLUSTER SETSLOT', slot_id, Token(state), node_id) elif state.upper() == 'STABLE': return self.execute_command('CLUSTER SETSLOT', slot_id, Token('STABLE')) else: raise RedisError('Invalid slot state: {0}'.format(state)) def cluster_slaves(self, target_node_id): """ Force a node cluster to handshake with another node Sends to targeted cluster node """ return self.execute_command('CLUSTER SLAVES', target_node_id) def cluster_slots(self): """ Get array of Cluster slot to node mappings Sends to random node in the cluster """ return self.execute_command('CLUSTER SLOTS') ########## # All methods that must have custom implementation def _parse_scan(self, response, **options): """ Borrowed from redis-py::client.py """ cursor, r = response return long(cursor), r def scan_iter(self, match=None, count=None): """ Make an iterator using the SCAN command so that the client doesn't need to remember the cursor position. ``match`` allows for filtering the keys by pattern ``count`` allows for hint the minimum number of returns Cluster impl: Result from SCAN is different in cluster mode. """ cursors = {} nodeData = {} for master_node in self.connection_pool.nodes.all_masters(): cursors[master_node["name"]] = "0" nodeData[master_node["name"]] = master_node while not all(cursors[node] == 0 for node in cursors): for node in cursors: if cursors[node] == 0: continue conn = self.connection_pool.get_connection_by_node(nodeData[node]) pieces = ['SCAN', cursors[node]] if match is not None: pieces.extend([Token('MATCH'), match]) if count is not None: pieces.extend([Token('COUNT'), count]) conn.send_command(*pieces) raw_resp = conn.read_response() # if you don't release the connection, the driver will make another, and you will hate your life self.connection_pool.release(conn) cur, resp = self._parse_scan(raw_resp) cursors[node] = cur for r in resp: yield r def mget(self, keys, *args): """ Returns a list of values ordered identically to ``keys`` Cluster impl: Itterate all keys and send GET for each key. This will go alot slower than a normal mget call in Redis. Operation is no longer atomic. """ return [self.get(arg) for arg in list_or_args(keys, args)] def mset(self, *args, **kwargs): """ Sets key/values based on a mapping. Mapping can be supplied as a single dictionary argument or as kwargs. Cluster impl: Itterate over all items and do SET on each (k,v) pair Operation is no longer atomic. """ if args: if len(args) != 1 or not isinstance(args[0], dict): raise RedisError('MSET requires **kwargs or a single dict arg') kwargs.update(args[0]) for pair in iteritems(kwargs): self.set(pair[0], pair[1]) return True def msetnx(self, *args, **kwargs): """ Sets key/values based on a mapping if none of the keys are already set. Mapping can be supplied as a single dictionary argument or as kwargs. Returns a boolean indicating if the operation was successful. Clutser impl: Itterate over all items and do GET to determine if all keys do not exists. If true then call mset() on all keys. """ if args: if len(args) != 1 or not isinstance(args[0], dict): raise RedisError('MSETNX requires **kwargs or a single dict arg') kwargs.update(args[0]) # Itterate over all items and fail fast if one value is True. for k, _ in kwargs.items(): if self.get(k): return False return self.mset(**kwargs) def rename(self, src, dst, replace=False): """ Rename key ``src`` to ``dst`` Cluster impl: If the src and dsst keys is in the same slot then send a plain RENAME command to that node to do the rename inside the server. If the keys is in crossslots then use the client side implementation as fallback method. In this case this operation is no longer atomic as the key is dumped and posted back to the server through the client. """ if src == dst: raise ResponseError("source and destination objects are the same") # # Optimization where if both keys is in the same slot then we can use the # plain upstream rename method. # src_slot = self.connection_pool.nodes.keyslot(src) dst_slot = self.connection_pool.nodes.keyslot(dst) if src_slot == dst_slot: return self.execute_command('RENAME', src, dst) # # To provide cross slot support we implement rename by doing the internal command # redis server runs but in the client instead. # data = self.dump(src) if data is None: raise ResponseError("no such key") ttl = self.pttl(src) if ttl is None or ttl < 1: ttl = 0 self.delete(dst) self.restore(dst, ttl, data, replace) self.delete(src) return True def delete(self, *names): """ "Delete one or more keys specified by ``names``" Cluster impl: Iterate all keys and send DELETE for each key. This will go a lot slower than a normal delete call in Redis. Operation is no longer atomic. """ count = 0 for arg in names: count += self.execute_command('DEL', arg) return count def renamenx(self, src, dst): """ Rename key ``src`` to ``dst`` if ``dst`` doesn't already exist Cluster impl: Check if dst key do not exists, then calls rename(). Operation is no longer atomic. """ if not self.exists(dst): return self.rename(src, dst) return False def pubsub_channels(self, pattern='*', aggregate=True): """ Return a list of channels that have at least one subscriber. Aggregate toggles merging of response. """ return self.execute_command('PUBSUB CHANNELS', pattern, aggregate=aggregate) def pubsub_numpat(self, aggregate=True): """ Returns the number of subscriptions to patterns. Aggregate toggles merging of response. """ return self.execute_command('PUBSUB NUMPAT', aggregate=aggregate) def pubsub_numsub(self, *args, **kwargs): """ Return a list of (channel, number of subscribers) tuples for each channel given in ``*args``. ``aggregate`` keyword argument toggles merging of response. """ options = {'aggregate': kwargs.get('aggregate', True)} return self.execute_command('PUBSUB NUMSUB', *args, **options) #### # List commands def brpoplpush(self, src, dst, timeout=0): """ Pop a value off the tail of ``src``, push it on the head of ``dst`` and then return it. This command blocks until a value is in ``src`` or until ``timeout`` seconds elapse, whichever is first. A ``timeout`` value of 0 blocks forever. Cluster impl: Call brpop() then send the result into lpush() Operation is no longer atomic. """ try: value = self.brpop(src, timeout=timeout) if value is None: return None except TimeoutError: # Timeout was reached return None self.lpush(dst, value[1]) return value[1] def rpoplpush(self, src, dst): """ RPOP a value off of the ``src`` list and atomically LPUSH it on to the ``dst`` list. Returns the value. Cluster impl: Call rpop() then send the result into lpush() Operation is no longer atomic. """ value = self.rpop(src) if value: self.lpush(dst, value) return value return None ### # Set commands def sdiff(self, keys, *args): """ Return the difference of sets specified by ``keys`` Cluster impl: Querry all keys and diff all sets and return result """ k = list_or_args(keys, args) res = self.smembers(k[0]) for arg in k[1:]: res = res - self.smembers(arg) return res def sdiffstore(self, dest, keys, *args): """ Store the difference of sets specified by ``keys`` into a new set named ``dest``. Returns the number of keys in the new set. Overwrites dest key if it exists. Cluster impl: Use sdiff() --> Delete dest key --> store result in dest key """ res = self.sdiff(keys, *args) self.delete(dest) if not res: return 0 return self.sadd(dest, *res) def sinter(self, keys, *args): """ Return the intersection of sets specified by ``keys`` Cluster impl: Querry all keys, intersection and return result """ k = list_or_args(keys, args) res = self.smembers(k[0]) for arg in k[1:]: res = res & self.smembers(arg) return res def sinterstore(self, dest, keys, *args): """ Store the intersection of sets specified by ``keys`` into a new set named ``dest``. Returns the number of keys in the new set. Cluster impl: Use sinter() --> Delete dest key --> store result in dest key """ res = self.sinter(keys, *args) self.delete(dest) if res: self.sadd(dest, *res) return len(res) else: return 0 def smove(self, src, dst, value): """ Move ``value`` from set ``src`` to set ``dst`` atomically Cluster impl: SMEMBERS --> SREM --> SADD. Function is no longer atomic. """ res = self.srem(src, value) # Only add the element if existed in src set if res == 1: self.sadd(dst, value) return res def sunion(self, keys, *args): """ Return the union of sets specified by ``keys`` Cluster impl: Querry all keys, union and return result Operation is no longer atomic. """ k = list_or_args(keys, args) res = self.smembers(k[0]) for arg in k[1:]: res = res | self.smembers(arg) return res def sunionstore(self, dest, keys, *args): """ Store the union of sets specified by ``keys`` into a new set named ``dest``. Returns the number of keys in the new set. Cluster impl: Use sunion() --> Dlete dest key --> store result in dest key Operation is no longer atomic. """ res = self.sunion(keys, *args) self.delete(dest) return self.sadd(dest, *res) def pfcount(self, *sources): """ pfcount only works when all sources point to the same hash slot. """ return super(self.__class__, self).pfcount(*sources) def pfmerge(self, dest, *sources): """ Merge N different HyperLogLogs into a single one. Cluster impl: Very special implementation is required to make pfmerge() work But it works :] It works by first fetching all HLL objects that should be merged and move them to one hashslot so that pfmerge operation can be performed without any 'CROSSSLOT' error. After the PFMERGE operation is done then it will be moved to the correct location within the cluster and cleanup is done. This operation is no longer atomic because of all the operations that has to be done. """ all_k = [] # Fetch all HLL objects via GET and store them client side as strings all_hll_objects = [self.get(hll_key) for hll_key in sources] # Randomize a keyslot hash that should be used inside {} when doing SET random_hash_slot = self._random_id() # Special handling of dest variable if it allready exists, then it shold be included in the HLL merge # dest can exists anywhere in the cluster. dest_data = self.get(dest) if dest_data: all_hll_objects.append(dest_data) # SET all stored HLL objects with SET {RandomHash}RandomKey hll_obj for hll_object in all_hll_objects: k = self._random_good_hashslot_key(random_hash_slot) all_k.append(k) self.set(k, hll_object) # Do regular PFMERGE operation and store value in random key in {RandomHash} tmp_dest = self._random_good_hashslot_key(random_hash_slot) self.execute_command("PFMERGE", tmp_dest, *all_k) # Do GET and SET so that result will be stored in the destination object any where in the cluster parsed_dest = self.get(tmp_dest) self.set(dest, parsed_dest) # Cleanup tmp variables self.delete(tmp_dest) for k in all_k: self.delete(k) return True def _random_good_hashslot_key(self, hashslot): """ Generate a good random key with a low probability of collision between any other key. """ # TODO: Check if the key exists or not. continue to randomize until a empty key is found random_id = "{{0}}{1}".format(hashslot, self._random_id()) return random_id def _random_id(self, size=16, chars=string.ascii_uppercase + string.digits): """ Generates a random id based on `size` and `chars` variable. By default it will generate a 16 character long string based on ascii uppercase letters and digits. """ return ''.join(random.choice(chars) for _ in range(size)) from rediscluster.pipeline import ClusterPipeline redis-py-cluster-2.0.0/rediscluster/connection.py000066400000000000000000000354761352661744600222020ustar00rootroot00000000000000# -*- coding: utf-8 -*- # python std lib from __future__ import unicode_literals import os import random import threading from contextlib import contextmanager from itertools import chain # rediscluster imports from .nodemanager import NodeManager from .exceptions import ( RedisClusterException, AskError, MovedError, TryAgainError, ClusterDownError, ClusterCrossSlotError, MasterDownError, ) # 3rd party imports from redis._compat import nativestr from redis.client import dict_merge from redis.connection import ConnectionPool, Connection, DefaultParser, SSLConnection from redis.exceptions import ConnectionError class ClusterParser(DefaultParser): """ """ EXCEPTION_CLASSES = dict_merge( DefaultParser.EXCEPTION_CLASSES, { 'ASK': AskError, 'TRYAGAIN': TryAgainError, 'MOVED': MovedError, 'CLUSTERDOWN': ClusterDownError, 'CROSSSLOT': ClusterCrossSlotError, 'MASTERDOWN': MasterDownError, }) class ClusterConnection(Connection): "Manages TCP communication to and from a Redis server" description_format = "ClusterConnection" def __init__(self, *args, **kwargs): self.readonly = kwargs.pop('readonly', False) kwargs['parser_class'] = ClusterParser super(ClusterConnection, self).__init__(*args, **kwargs) def on_connect(self): ''' Initialize the connection, authenticate and select a database and send READONLY if it is set during object initialization. ''' super(ClusterConnection, self).on_connect() if self.readonly: self.send_command('READONLY') if nativestr(self.read_response()) != 'OK': raise ConnectionError('READONLY command failed') class SSLClusterConnection(SSLConnection): """ Manages TCP communication over TLS/SSL to and from a Redis cluster Usage: pool = ClusterConnectionPool(connection_class=SSLClusterConnection, ...) client = RedisCluster(connection_pool=pool) """ description_format = "SSLClusterConnection" def __init__(self, **kwargs): self.readonly = kwargs.pop('readonly', False) kwargs['parser_class'] = ClusterParser super(SSLClusterConnection, self).__init__(**kwargs) def on_connect(self): ''' Initialize the connection, authenticate and select a database and send READONLY if it is set during object initialization. ''' super(SSLClusterConnection, self).on_connect() if self.readonly: self.send_command('READONLY') if nativestr(self.read_response()) != 'OK': raise ConnectionError('READONLY command failed') class UnixDomainSocketConnection(Connection): """ """ description_format = "ClusterUnixDomainSocketConnection" class ClusterConnectionPool(ConnectionPool): """ Custom connection pool for rediscluster """ RedisClusterDefaultTimeout = None def __init__(self, startup_nodes=None, init_slot_cache=True, connection_class=None, max_connections=None, max_connections_per_node=False, reinitialize_steps=None, skip_full_coverage_check=False, nodemanager_follow_cluster=False, **connection_kwargs): """ :skip_full_coverage_check: Skips the check of cluster-require-full-coverage config, useful for clusters without the CONFIG command (like aws) :nodemanager_follow_cluster: The node manager will during initialization try the last set of nodes that it was operating on. This will allow the client to drift along side the cluster if the cluster nodes move around alot. """ if connection_class is None: connection_class = ClusterConnection super(ClusterConnectionPool, self).__init__(connection_class=connection_class, max_connections=max_connections) # Special case to make from_url method compliant with cluster setting. # from_url method will send in the ip and port through a different variable then the # regular startup_nodes variable. if startup_nodes is None: if 'port' in connection_kwargs and 'host' in connection_kwargs: startup_nodes = [{ 'host': connection_kwargs.pop('host'), 'port': str(connection_kwargs.pop('port')), }] self.max_connections = max_connections or 2 ** 31 self.max_connections_per_node = max_connections_per_node if connection_class == SSLClusterConnection: connection_kwargs['ssl'] = True # needed in Redis init self.nodes = NodeManager( startup_nodes, reinitialize_steps=reinitialize_steps, skip_full_coverage_check=skip_full_coverage_check, max_connections=self.max_connections, nodemanager_follow_cluster=nodemanager_follow_cluster, **connection_kwargs ) if init_slot_cache: self.nodes.initialize() self.connections = {} self.connection_kwargs = connection_kwargs self.reset() if "socket_timeout" not in self.connection_kwargs: self.connection_kwargs["socket_timeout"] = ClusterConnectionPool.RedisClusterDefaultTimeout def __repr__(self): """ Return a string with all unique ip:port combinations that this pool is connected to. """ nodes = [{'host': i['host'], 'port': i['port']} for i in self.nodes.startup_nodes] return "{0}<{1}>".format( type(self).__name__, ", ".join([self.connection_class.description_format % dict(node, **self.connection_kwargs) for node in nodes]) ) def reset(self): """ Resets the connection pool back to a clean state. """ self.pid = os.getpid() self._created_connections = 0 self._created_connections_per_node = {} # Dict(Node, Int) self._available_connections = {} # Dict(Node, List) self._in_use_connections = {} # Dict(Node, Set) self._check_lock = threading.Lock() def _checkpid(self): """ """ if self.pid != os.getpid(): with self._check_lock: if self.pid == os.getpid(): # another thread already did the work while we waited # on the lockself. return self.disconnect() self.reset() def get_connection(self, command_name, *keys, **options): """ # TODO: Default method entrypoint. Keys, options is not in use by any of the standard code. """ # Only pubsub command/connection should be allowed here if command_name != "pubsub": raise RedisClusterException("Only 'pubsub' commands can be used by get_connection()") channel = options.pop('channel', None) if not channel: return self.get_random_connection() slot = self.nodes.keyslot(channel) node = self.get_master_node_by_slot(slot) self._checkpid() try: connection = self._available_connections.get(node["name"], []).pop() except IndexError: connection = self.make_connection(node) if node['name'] not in self._in_use_connections: self._in_use_connections[node['name']] = set() self._in_use_connections[node['name']].add(connection) return connection def make_connection(self, node): """ Create a new connection """ if self.count_all_num_connections(node) >= self.max_connections: if self.max_connections_per_node: raise RedisClusterException("Too many connection ({0}) for node: {1}".format(self.count_all_num_connections(node), node['name'])) raise RedisClusterException("Too many connections") self._created_connections_per_node.setdefault(node['name'], 0) self._created_connections_per_node[node['name']] += 1 connection = self.connection_class(host=node["host"], port=node["port"], **self.connection_kwargs) # Must store node in the connection to make it eaiser to track connection.node = node return connection def release(self, connection): """ Releases the connection back to the pool """ self._checkpid() if connection.pid != self.pid: return # Remove the current connection from _in_use_connection and add it back to the available pool # There is cases where the connection is to be removed but it will not exist and there # must be a safe way to remove i_c = self._in_use_connections.get(connection.node["name"], set()) if connection in i_c: i_c.remove(connection) else: pass # TODO: Log.warning("Tried to release connection that did not exist any longer : {0}".format(connection)) self._available_connections.setdefault(connection.node["name"], []).append(connection) def disconnect(self): """ Nothing that requires any overwrite. """ all_conns = chain( self._available_connections.values(), self._in_use_connections.values(), ) for node_connections in all_conns: for connection in node_connections: connection.disconnect() def count_all_num_connections(self, node): """ """ if self.max_connections_per_node: return self._created_connections_per_node.get(node['name'], 0) return sum([i for i in self._created_connections_per_node.values()]) def get_random_connection(self): """ Open new connection to random redis server. """ # TODO: Should this open a new random connection or shuld it look if there is any # open available connections and return that instead? for node in self.nodes.random_startup_node_ittr(): connection = self.get_connection_by_node(node) if connection: return connection raise Exception("Cant reach a single startup node.") def get_connection_by_key(self, key, command): """ """ if not key: raise RedisClusterException("No way to dispatch this command to Redis Cluster.") return self.get_connection_by_slot(self.nodes.keyslot(key)) def get_connection_by_slot(self, slot): """ Determine what server a specific slot belongs to and return a redis object that is connected """ self._checkpid() try: return self.get_connection_by_node(self.get_node_by_slot(slot)) except (KeyError, RedisClusterException) as exc: return self.get_random_connection() def get_connection_by_node(self, node): """ get a connection by node """ self._checkpid() self.nodes.set_node_name(node) try: # Try to get connection from existing pool connection = self._available_connections.get(node["name"], []).pop() except IndexError: connection = self.make_connection(node) self._in_use_connections.setdefault(node["name"], set()).add(connection) return connection def get_master_node_by_slot(self, slot): """ """ try: return self.nodes.slots[slot][0] except KeyError as ke: raise RedisClusterException('Slot "{slot}" not covered by the cluster. "skip_full_coverage_check={skip_full_coverage_check}"'.format( slot=slot, skip_full_coverage_check=self.nodes._skip_full_coverage_check, )) def get_node_by_slot(self, slot, *args, **kwargs): """ """ return self.get_master_node_by_slot(slot) class ClusterReadOnlyConnectionPool(ClusterConnectionPool): """ Readonly connection pool for rediscluster """ def __init__(self, startup_nodes=None, init_slot_cache=True, connection_class=None, max_connections=None, nodemanager_follow_cluster=False, **connection_kwargs): """ """ if connection_class is None: connection_class = ClusterConnection super(ClusterReadOnlyConnectionPool, self).__init__( startup_nodes=startup_nodes, init_slot_cache=init_slot_cache, connection_class=connection_class, max_connections=max_connections, readonly=True, nodemanager_follow_cluster=nodemanager_follow_cluster, **connection_kwargs) self.master_node_commands = ('SCAN', 'SSCAN', 'HSCAN', 'ZSCAN') def get_connection_by_key(self, key, command): """ """ if not key: raise RedisClusterException("No way to dispatch this command to Redis Cluster.") if command in self.master_node_commands: return self.get_master_connection_by_slot(self.nodes.keyslot(key)) else: return self.get_random_master_slave_connection_by_slot(self.nodes.keyslot(key)) def get_master_connection_by_slot(self, slot): """ Returns a connection for the Master node for the specefied slot. Do not return a random node if master node is not available for any reason. """ self._checkpid() return self.get_connection_by_node(self.get_node_by_slot(slot)) def get_random_master_slave_connection_by_slot(self, slot): """ Returns a random connection from the set of (master + slaves) for the specefied slot. If connection is not reachable then return a random connection. """ self._checkpid() try: return self.get_node_by_slot_random(self.get_node_by_slot(slot)) except KeyError: return self.get_random_connection() def get_node_by_slot_random(self, slot): """ Return a random node for the specified slot. """ return random.choice(self.nodes.slots[slot]) class ClusterWithReadReplicasConnectionPool(ClusterConnectionPool): """ Custom connection pool for rediscluster with load balancing across read replicas """ def get_node_by_slot(self, slot, read_command=False): """ Get a random node from the slot, including master """ nodes_in_slot = self.nodes.slots[slot] if read_command: random_index = random.randrange(0, len(nodes_in_slot)) return nodes_in_slot[random_index] else: return nodes_in_slot[0] @contextmanager def by_node_context(pool, node): """ Get a connection from the pool and automatically release it back """ connection = pool.get_connection_by_node(node) yield connection pool.release(connection) redis-py-cluster-2.0.0/rediscluster/crc.py000066400000000000000000000052421352661744600205760ustar00rootroot00000000000000# -*- coding: utf-8 -*- # python std lib import sys x_mode_m_crc16_lookup = [ 0x0000, 0x1021, 0x2042, 0x3063, 0x4084, 0x50a5, 0x60c6, 0x70e7, 0x8108, 0x9129, 0xa14a, 0xb16b, 0xc18c, 0xd1ad, 0xe1ce, 0xf1ef, 0x1231, 0x0210, 0x3273, 0x2252, 0x52b5, 0x4294, 0x72f7, 0x62d6, 0x9339, 0x8318, 0xb37b, 0xa35a, 0xd3bd, 0xc39c, 0xf3ff, 0xe3de, 0x2462, 0x3443, 0x0420, 0x1401, 0x64e6, 0x74c7, 0x44a4, 0x5485, 0xa56a, 0xb54b, 0x8528, 0x9509, 0xe5ee, 0xf5cf, 0xc5ac, 0xd58d, 0x3653, 0x2672, 0x1611, 0x0630, 0x76d7, 0x66f6, 0x5695, 0x46b4, 0xb75b, 0xa77a, 0x9719, 0x8738, 0xf7df, 0xe7fe, 0xd79d, 0xc7bc, 0x48c4, 0x58e5, 0x6886, 0x78a7, 0x0840, 0x1861, 0x2802, 0x3823, 0xc9cc, 0xd9ed, 0xe98e, 0xf9af, 0x8948, 0x9969, 0xa90a, 0xb92b, 0x5af5, 0x4ad4, 0x7ab7, 0x6a96, 0x1a71, 0x0a50, 0x3a33, 0x2a12, 0xdbfd, 0xcbdc, 0xfbbf, 0xeb9e, 0x9b79, 0x8b58, 0xbb3b, 0xab1a, 0x6ca6, 0x7c87, 0x4ce4, 0x5cc5, 0x2c22, 0x3c03, 0x0c60, 0x1c41, 0xedae, 0xfd8f, 0xcdec, 0xddcd, 0xad2a, 0xbd0b, 0x8d68, 0x9d49, 0x7e97, 0x6eb6, 0x5ed5, 0x4ef4, 0x3e13, 0x2e32, 0x1e51, 0x0e70, 0xff9f, 0xefbe, 0xdfdd, 0xcffc, 0xbf1b, 0xaf3a, 0x9f59, 0x8f78, 0x9188, 0x81a9, 0xb1ca, 0xa1eb, 0xd10c, 0xc12d, 0xf14e, 0xe16f, 0x1080, 0x00a1, 0x30c2, 0x20e3, 0x5004, 0x4025, 0x7046, 0x6067, 0x83b9, 0x9398, 0xa3fb, 0xb3da, 0xc33d, 0xd31c, 0xe37f, 0xf35e, 0x02b1, 0x1290, 0x22f3, 0x32d2, 0x4235, 0x5214, 0x6277, 0x7256, 0xb5ea, 0xa5cb, 0x95a8, 0x8589, 0xf56e, 0xe54f, 0xd52c, 0xc50d, 0x34e2, 0x24c3, 0x14a0, 0x0481, 0x7466, 0x6447, 0x5424, 0x4405, 0xa7db, 0xb7fa, 0x8799, 0x97b8, 0xe75f, 0xf77e, 0xc71d, 0xd73c, 0x26d3, 0x36f2, 0x0691, 0x16b0, 0x6657, 0x7676, 0x4615, 0x5634, 0xd94c, 0xc96d, 0xf90e, 0xe92f, 0x99c8, 0x89e9, 0xb98a, 0xa9ab, 0x5844, 0x4865, 0x7806, 0x6827, 0x18c0, 0x08e1, 0x3882, 0x28a3, 0xcb7d, 0xdb5c, 0xeb3f, 0xfb1e, 0x8bf9, 0x9bd8, 0xabbb, 0xbb9a, 0x4a75, 0x5a54, 0x6a37, 0x7a16, 0x0af1, 0x1ad0, 0x2ab3, 0x3a92, 0xfd2e, 0xed0f, 0xdd6c, 0xcd4d, 0xbdaa, 0xad8b, 0x9de8, 0x8dc9, 0x7c26, 0x6c07, 0x5c64, 0x4c45, 0x3ca2, 0x2c83, 0x1ce0, 0x0cc1, 0xef1f, 0xff3e, 0xcf5d, 0xdf7c, 0xaf9b, 0xbfba, 0x8fd9, 0x9ff8, 0x6e17, 0x7e36, 0x4e55, 0x5e74, 0x2e93, 0x3eb2, 0x0ed1, 0x1ef0 ] def _crc16_py3(data): """ """ crc = 0 for byte in data: crc = ((crc << 8) & 0xff00) ^ x_mode_m_crc16_lookup[((crc >> 8) & 0xff) ^ byte] return crc & 0xffff def _crc16_py2(data): """ """ crc = 0 for byte in data: crc = ((crc << 8) & 0xff00) ^ x_mode_m_crc16_lookup[((crc >> 8) & 0xff) ^ ord(byte)] return crc & 0xffff if sys.version_info >= (3, 0, 0): crc16 = _crc16_py3 else: crc16 = _crc16_py2 redis-py-cluster-2.0.0/rediscluster/exceptions.py000066400000000000000000000026731352661744600222150ustar00rootroot00000000000000# -*- coding: utf-8 -*- from redis.exceptions import ( ResponseError, RedisError, ) class RedisClusterException(Exception): """ """ pass class RedisClusterError(Exception): """ """ pass class ClusterDownException(Exception): """ """ pass class ClusterError(RedisError): """ """ pass class ClusterCrossSlotError(ResponseError): """ """ message = "Keys in request don't hash to the same slot" class ClusterDownError(ClusterError, ResponseError): """ """ def __init__(self, resp): self.args = (resp, ) self.message = resp class AskError(ResponseError): """ src node: MIGRATING to dst node get > ASK error ask dst node > ASKING command dst node: IMPORTING from src node asking command only affects next command any op will be allowed after asking command """ def __init__(self, resp): """should only redirect to master node""" self.args = (resp, ) self.message = resp slot_id, new_node = resp.split(' ') host, port = new_node.rsplit(':', 1) self.slot_id = int(slot_id) self.node_addr = self.host, self.port = host, int(port) class TryAgainError(ResponseError): """ """ def __init__(self, *args, **kwargs): pass class MovedError(AskError): """ """ pass class MasterDownError(ClusterDownError): """ """ pass redis-py-cluster-2.0.0/rediscluster/nodemanager.py000066400000000000000000000270111352661744600223050ustar00rootroot00000000000000# -*- coding: utf-8 -*- # python std lib import random # rediscluster imports from .crc import crc16 from .exceptions import RedisClusterException # 3rd party imports from redis import Redis from redis._compat import unicode, bytes, long, basestring from redis.connection import Encoder from redis import ConnectionError, TimeoutError, ResponseError class NodeManager(object): """ """ RedisClusterHashSlots = 16384 def __init__(self, startup_nodes=None, reinitialize_steps=None, skip_full_coverage_check=False, nodemanager_follow_cluster=False, **connection_kwargs): """ :skip_full_coverage_check: Skips the check of cluster-require-full-coverage config, useful for clusters without the CONFIG command (like aws) :nodemanager_follow_cluster: The node manager will during initialization try the last set of nodes that it was operating on. This will allow the client to drift along side the cluster if the cluster nodes move around alot. """ self.connection_kwargs = connection_kwargs self.nodes = {} self.slots = {} self.startup_nodes = [] if startup_nodes is None else startup_nodes self.orig_startup_nodes = [node for node in self.startup_nodes] self.reinitialize_counter = 0 self.reinitialize_steps = reinitialize_steps or 25 self._skip_full_coverage_check = skip_full_coverage_check self.nodemanager_follow_cluster = nodemanager_follow_cluster self.encoder = Encoder( connection_kwargs.get('encoding', 'utf-8'), connection_kwargs.get('encoding_errors', 'strict'), connection_kwargs.get('decode_responses', False) ) if not self.startup_nodes: raise RedisClusterException("No startup nodes provided") def keyslot(self, key): """ Calculate keyslot for a given key. Tuned for compatibility with python 2.7.x """ k = self.encoder.encode(key) start = k.find(b"{") if start > -1: end = k.find(b"}", start + 1) if end > -1 and end != start + 1: k = k[start + 1:end] return crc16(k) % self.RedisClusterHashSlots def node_from_slot(self, slot): """ """ for node in self.slots[slot]: if node['server_type'] == 'master': return node def all_nodes(self): """ """ for node in self.nodes.values(): yield node def all_masters(self): """ """ for node in self.nodes.values(): if node["server_type"] == "master": yield node def random_startup_node(self): """ """ random.shuffle(self.startup_nodes) return self.startup_nodes[0] def random_startup_node_ittr(self): """ Generator that will return a random startup nodes. Works as a generator. """ while True: yield random.choice(self.startup_nodes) def random_node(self): """ """ key = random.choice(list(self.nodes.keys())) return self.nodes[key] def get_redis_link(self, host, port, decode_responses=False): """ """ allowed_keys = ( 'host', 'port', 'db', 'password', 'socket_timeout', 'socket_connect_timeout', 'socket_keepalive', 'socket_keepalive_options', 'connection_pool', 'unix_socket_path', 'encoding', 'encoding_errors', 'charset', 'errors', 'decode_responses', 'retry_on_timeout', 'ssl', 'ssl_keyfile', 'ssl_certfile', 'ssl_cert_reqs', 'ssl_ca_certs', 'max_connections', ) disabled_keys = ( 'host', 'port', 'decode_responses', ) connection_kwargs = {k: v for k, v in self.connection_kwargs.items() if k in set(allowed_keys) - set(disabled_keys)} return Redis(host=host, port=port, decode_responses=decode_responses, **connection_kwargs) def initialize(self): """ Init the slots cache by asking all startup nodes what the current cluster configuration is """ nodes_cache = {} tmp_slots = {} all_slots_covered = False disagreements = [] startup_nodes_reachable = False nodes = self.orig_startup_nodes # With this option the client will attempt to connect to any of the previous set of nodes instead of the original set of nodes if self.nodemanager_follow_cluster: nodes = self.startup_nodes for node in nodes: try: r = self.get_redis_link(host=node["host"], port=node["port"], decode_responses=True) cluster_slots = r.execute_command("cluster", "slots") startup_nodes_reachable = True except (ConnectionError, TimeoutError): continue except ResponseError as e: # Isn't a cluster connection, so it won't parse these exceptions automatically message = e.__str__() if 'CLUSTERDOWN' in message or 'MASTERDOWN' in message: continue else: raise RedisClusterException("ERROR sending 'cluster slots' command to redis server: {0}".format(node)) except Exception: raise RedisClusterException("ERROR sending 'cluster slots' command to redis server: {0}".format(node)) all_slots_covered = True # If there's only one server in the cluster, its ``host`` is '' # Fix it to the host in startup_nodes if (len(cluster_slots) == 1 and len(cluster_slots[0][2][0]) == 0 and len(self.startup_nodes) == 1): cluster_slots[0][2][0] = self.startup_nodes[0]['host'] # No need to decode response because Redis should handle that for us... for slot in cluster_slots: master_node = slot[2] if master_node[0] == '': master_node[0] = node['host'] master_node[1] = int(master_node[1]) node, node_name = self.make_node_obj(master_node[0], master_node[1], 'master') nodes_cache[node_name] = node for i in range(int(slot[0]), int(slot[1]) + 1): if i not in tmp_slots: tmp_slots[i] = [node] slave_nodes = [slot[j] for j in range(3, len(slot))] for slave_node in slave_nodes: target_slave_node, slave_node_name = self.make_node_obj(slave_node[0], slave_node[1], 'slave') nodes_cache[slave_node_name] = target_slave_node tmp_slots[i].append(target_slave_node) else: # Validate that 2 nodes want to use the same slot cache setup if tmp_slots[i][0]['name'] != node['name']: disagreements.append("{0} vs {1} on slot: {2}".format( tmp_slots[i][0]['name'], node['name'], i), ) if len(disagreements) > 5: raise RedisClusterException("startup_nodes could not agree on a valid slots cache. {0}".format(", ".join(disagreements))) self.populate_startup_nodes() self.refresh_table_asap = False if self._skip_full_coverage_check: need_full_slots_coverage = False else: need_full_slots_coverage = self.cluster_require_full_coverage(nodes_cache) # Validate if all slots are covered or if we should try next startup node for i in range(0, self.RedisClusterHashSlots): if i not in tmp_slots and need_full_slots_coverage: all_slots_covered = False if all_slots_covered: # All slots are covered and application can continue to execute break if not startup_nodes_reachable: raise RedisClusterException("Redis Cluster cannot be connected. Please provide at least one reachable node.") if not all_slots_covered: raise RedisClusterException("All slots are not covered after query all startup_nodes. {0} of {1} covered...".format( len(tmp_slots), self.RedisClusterHashSlots)) # Set the tmp variables to the real variables self.slots = tmp_slots self.nodes = nodes_cache self.reinitialize_counter = 0 def increment_reinitialize_counter(self, ct=1): for i in range(1, ct): self.reinitialize_counter += 1 if self.reinitialize_counter % self.reinitialize_steps == 0: self.initialize() def cluster_require_full_coverage(self, nodes_cache): """ if exists 'cluster-require-full-coverage no' config on redis servers, then even all slots are not covered, cluster still will be able to respond """ nodes = nodes_cache or self.nodes def node_require_full_coverage(node): try: r_node = self.get_redis_link(host=node["host"], port=node["port"], decode_responses=True) return "yes" in r_node.config_get("cluster-require-full-coverage").values() except ConnectionError: return False except Exception: raise RedisClusterException("ERROR sending 'config get cluster-require-full-coverage' command to redis server: {0}".format(node)) # at least one node should have cluster-require-full-coverage yes return any(node_require_full_coverage(node) for node in nodes.values()) def set_node_name(self, n): """ Format the name for the given node object # TODO: This shold not be constructed this way. It should update the name of the node in the node cache dict """ if "name" not in n: n["name"] = "{0}:{1}".format(n["host"], n["port"]) def make_node_obj(self, host, port, server_type): """ Create a node datastructure. Returns the node datastructure and the node name """ node_name = "{0}:{1}".format(host, port) node = { 'host': host, 'port': port, 'name': node_name, 'server_type': server_type } return (node, node_name) def set_node(self, host, port, server_type=None): """ Update data for a node. """ node, node_name = self.make_node_obj(host, port, server_type) self.nodes[node_name] = node return node def populate_startup_nodes(self): """ Do something with all startup nodes and filters out any duplicates """ for item in self.startup_nodes: self.set_node_name(item) for n in self.nodes.values(): if n not in self.startup_nodes: self.startup_nodes.append(n) # freeze it so we can set() it uniq = {frozenset(node.items()) for node in self.startup_nodes} # then thaw it back out into a list of dicts self.startup_nodes = [dict(node) for node in uniq] def reset(self): """ Drop all node data and start over from startup_nodes """ self.initialize() redis-py-cluster-2.0.0/rediscluster/pipeline.py000066400000000000000000000435241352661744600216410ustar00rootroot00000000000000# -*- coding: utf-8 -*- # python std lib import sys # rediscluster imports from .client import RedisCluster from .exceptions import ( RedisClusterException, AskError, MovedError, TryAgainError, ) from .utils import clusterdown_wrapper, dict_merge # 3rd party imports from redis import Redis from redis.exceptions import ConnectionError, RedisError, TimeoutError from redis._compat import imap, unicode ERRORS_ALLOW_RETRY = (ConnectionError, TimeoutError, MovedError, AskError, TryAgainError) class ClusterPipeline(RedisCluster): """ """ def __init__(self, connection_pool, result_callbacks=None, response_callbacks=None, startup_nodes=None, read_from_replicas=False): """ """ self.command_stack = [] self.refresh_table_asap = False self.connection_pool = connection_pool self.result_callbacks = result_callbacks or self.__class__.RESULT_CALLBACKS.copy() self.startup_nodes = startup_nodes if startup_nodes else [] self.read_from_replicas = read_from_replicas self.nodes_flags = self.__class__.NODES_FLAGS.copy() self.response_callbacks = dict_merge(response_callbacks or self.__class__.RESPONSE_CALLBACKS.copy(), self.CLUSTER_COMMANDS_RESPONSE_CALLBACKS) def __repr__(self): """ """ return "{0}".format(type(self).__name__) def __enter__(self): """ """ return self def __exit__(self, exc_type, exc_value, traceback): """ """ self.reset() def __del__(self): """ """ self.reset() def __len__(self): """ """ return len(self.command_stack) def execute_command(self, *args, **kwargs): """ """ return self.pipeline_execute_command(*args, **kwargs) def pipeline_execute_command(self, *args, **options): """ """ self.command_stack.append(PipelineCommand(args, options, len(self.command_stack))) return self def raise_first_error(self, stack): """ """ for c in stack: r = c.result if isinstance(r, Exception): self.annotate_exception(r, c.position + 1, c.args) raise r def annotate_exception(self, exception, number, command): """ """ cmd = unicode(' ').join(imap(unicode, command)) msg = unicode('Command # {0} ({1}) of pipeline caused error: {2}').format( number, cmd, unicode(exception.args[0])) exception.args = (msg,) + exception.args[1:] def execute(self, raise_on_error=True): """ """ stack = self.command_stack if not stack: return [] try: return self.send_cluster_commands(stack, raise_on_error) finally: self.reset() def reset(self): """ Reset back to empty pipeline. """ self.command_stack = [] self.scripts = set() # TODO: Implement # make sure to reset the connection state in the event that we were # watching something # if self.watching and self.connection: # try: # # call this manually since our unwatch or # # immediate_execute_command methods can call reset() # self.connection.send_command('UNWATCH') # self.connection.read_response() # except ConnectionError: # # disconnect will also remove any previous WATCHes # self.connection.disconnect() # clean up the other instance attributes self.watching = False self.explicit_transaction = False # TODO: Implement # we can safely return the connection to the pool here since we're # sure we're no longer WATCHing anything # if self.connection: # self.connection_pool.release(self.connection) # self.connection = None @clusterdown_wrapper def send_cluster_commands(self, stack, raise_on_error=True, allow_redirections=True): """ Send a bunch of cluster commands to the redis cluster. `allow_redirections` If the pipeline should follow `ASK` & `MOVED` responses automatically. If set to false it will raise RedisClusterException. """ # the first time sending the commands we send all of the commands that were queued up. # if we have to run through it again, we only retry the commands that failed. attempt = sorted(stack, key=lambda x: x.position) # build a list of node objects based on node names we need to nodes = {} # as we move through each command that still needs to be processed, # we figure out the slot number that command maps to, then from the slot determine the node. for c in attempt: # refer to our internal node -> slot table that tells us where a given # command should route to. slot = self._determine_slot(*c.args) node = self.connection_pool.get_node_by_slot(slot) # little hack to make sure the node name is populated. probably could clean this up. self.connection_pool.nodes.set_node_name(node) # now that we know the name of the node ( it's just a string in the form of host:port ) # we can build a list of commands for each node. node_name = node['name'] if node_name not in nodes: nodes[node_name] = NodeCommands(self.parse_response, self.connection_pool.get_connection_by_node(node)) nodes[node_name].append(c) # send the commands in sequence. # we write to all the open sockets for each node first, before reading anything # this allows us to flush all the requests out across the network essentially in parallel # so that we can read them all in parallel as they come back. # we dont' multiplex on the sockets as they come available, but that shouldn't make too much difference. node_commands = nodes.values() for n in node_commands: n.write() for n in node_commands: n.read() # release all of the redis connections we allocated earlier back into the connection pool. # we used to do this step as part of a try/finally block, but it is really dangerous to # release connections back into the pool if for some reason the socket has data still left in it # from a previous operation. The write and read operations already have try/catch around them for # all known types of errors including connection and socket level errors. # So if we hit an exception, something really bad happened and putting any of # these connections back into the pool is a very bad idea. # the socket might have unread buffer still sitting in it, and then the # next time we read from it we pass the buffered result back from a previous # command and every single request after to that connection will always get # a mismatched result. (not just theoretical, I saw this happen on production x.x). for n in nodes.values(): self.connection_pool.release(n.connection) # if the response isn't an exception it is a valid response from the node # we're all done with that command, YAY! # if we have more commands to attempt, we've run into problems. # collect all the commands we are allowed to retry. # (MOVED, ASK, or connection errors or timeout errors) attempt = sorted([c for c in attempt if isinstance(c.result, ERRORS_ALLOW_RETRY)], key=lambda x: x.position) if attempt and allow_redirections: # RETRY MAGIC HAPPENS HERE! # send these remaing comamnds one at a time using `execute_command` # in the main client. This keeps our retry logic in one place mostly, # and allows us to be more confident in correctness of behavior. # at this point any speed gains from pipelining have been lost # anyway, so we might as well make the best attempt to get the correct # behavior. # # The client command will handle retries for each individual command # sequentially as we pass each one into `execute_command`. Any exceptions # that bubble out should only appear once all retries have been exhausted. # # If a lot of commands have failed, we'll be setting the # flag to rebuild the slots table from scratch. So MOVED errors should # correct themselves fairly quickly. self.connection_pool.nodes.increment_reinitialize_counter(len(attempt)) for c in attempt: try: # send each command individually like we do in the main client. c.result = super(ClusterPipeline, self).execute_command(*c.args, **c.options) except RedisError as e: c.result = e # turn the response back into a simple flat array that corresponds # to the sequence of commands issued in the stack in pipeline.execute() response = [c.result for c in sorted(stack, key=lambda x: x.position)] if raise_on_error: self.raise_first_error(stack) return response def _fail_on_redirect(self, allow_redirections): """ """ if not allow_redirections: raise RedisClusterException("ASK & MOVED redirection not allowed in this pipeline") def multi(self): """ """ raise RedisClusterException("method multi() is not implemented") def immediate_execute_command(self, *args, **options): """ """ raise RedisClusterException("method immediate_execute_command() is not implemented") def _execute_transaction(self, *args, **kwargs): """ """ raise RedisClusterException("method _execute_transaction() is not implemented") def load_scripts(self): """ """ raise RedisClusterException("method load_scripts() is not implemented") def watch(self, *names): """ """ raise RedisClusterException("method watch() is not implemented") def unwatch(self): """ """ raise RedisClusterException("method unwatch() is not implemented") def script_load_for_pipeline(self, *args, **kwargs): """ """ raise RedisClusterException("method script_load_for_pipeline() is not implemented") def delete(self, *names): """ "Delete a key specified by ``names``" """ if len(names) != 1: raise RedisClusterException("deleting multiple keys is not implemented in pipeline command") return self.execute_command('DEL', names[0]) def block_pipeline_command(func): """ Prints error because some pipelined commands should be blocked when running in cluster-mode """ def inner(*args, **kwargs): raise RedisClusterException("ERROR: Calling pipelined function {0} is blocked when running redis in cluster mode...".format(func.__name__)) return inner # Blocked pipeline commands ClusterPipeline.bgrewriteaof = block_pipeline_command(Redis.bgrewriteaof) ClusterPipeline.bgsave = block_pipeline_command(Redis.bgsave) ClusterPipeline.bitop = block_pipeline_command(Redis.bitop) ClusterPipeline.brpoplpush = block_pipeline_command(Redis.brpoplpush) ClusterPipeline.client_getname = block_pipeline_command(Redis.client_getname) ClusterPipeline.client_kill = block_pipeline_command(Redis.client_kill) ClusterPipeline.client_list = block_pipeline_command(Redis.client_list) ClusterPipeline.client_setname = block_pipeline_command(Redis.client_setname) ClusterPipeline.config_get = block_pipeline_command(Redis.config_get) ClusterPipeline.config_resetstat = block_pipeline_command(Redis.config_resetstat) ClusterPipeline.config_rewrite = block_pipeline_command(Redis.config_rewrite) ClusterPipeline.config_set = block_pipeline_command(Redis.config_set) ClusterPipeline.dbsize = block_pipeline_command(Redis.dbsize) ClusterPipeline.echo = block_pipeline_command(Redis.echo) ClusterPipeline.evalsha = block_pipeline_command(Redis.evalsha) ClusterPipeline.flushall = block_pipeline_command(Redis.flushall) ClusterPipeline.flushdb = block_pipeline_command(Redis.flushdb) ClusterPipeline.info = block_pipeline_command(Redis.info) ClusterPipeline.keys = block_pipeline_command(Redis.keys) ClusterPipeline.lastsave = block_pipeline_command(Redis.lastsave) ClusterPipeline.mget = block_pipeline_command(Redis.mget) ClusterPipeline.move = block_pipeline_command(Redis.move) ClusterPipeline.mset = block_pipeline_command(Redis.mset) ClusterPipeline.msetnx = block_pipeline_command(Redis.msetnx) ClusterPipeline.pfmerge = block_pipeline_command(Redis.pfmerge) ClusterPipeline.pfcount = block_pipeline_command(Redis.pfcount) ClusterPipeline.ping = block_pipeline_command(Redis.ping) ClusterPipeline.publish = block_pipeline_command(Redis.publish) ClusterPipeline.randomkey = block_pipeline_command(Redis.randomkey) ClusterPipeline.rename = block_pipeline_command(Redis.rename) ClusterPipeline.renamenx = block_pipeline_command(Redis.renamenx) ClusterPipeline.rpoplpush = block_pipeline_command(Redis.rpoplpush) ClusterPipeline.save = block_pipeline_command(Redis.save) ClusterPipeline.scan = block_pipeline_command(Redis.scan) ClusterPipeline.script_exists = block_pipeline_command(Redis.script_exists) ClusterPipeline.script_flush = block_pipeline_command(Redis.script_flush) ClusterPipeline.script_kill = block_pipeline_command(Redis.script_kill) ClusterPipeline.script_load = block_pipeline_command(Redis.script_load) ClusterPipeline.sdiff = block_pipeline_command(Redis.sdiff) ClusterPipeline.sdiffstore = block_pipeline_command(Redis.sdiffstore) ClusterPipeline.sentinel_get_master_addr_by_name = block_pipeline_command(Redis.sentinel_get_master_addr_by_name) ClusterPipeline.sentinel_master = block_pipeline_command(Redis.sentinel_master) ClusterPipeline.sentinel_masters = block_pipeline_command(Redis.sentinel_masters) ClusterPipeline.sentinel_monitor = block_pipeline_command(Redis.sentinel_monitor) ClusterPipeline.sentinel_remove = block_pipeline_command(Redis.sentinel_remove) ClusterPipeline.sentinel_sentinels = block_pipeline_command(Redis.sentinel_sentinels) ClusterPipeline.sentinel_set = block_pipeline_command(Redis.sentinel_set) ClusterPipeline.sentinel_slaves = block_pipeline_command(Redis.sentinel_slaves) ClusterPipeline.shutdown = block_pipeline_command(Redis.shutdown) ClusterPipeline.sinter = block_pipeline_command(Redis.sinter) ClusterPipeline.sinterstore = block_pipeline_command(Redis.sinterstore) ClusterPipeline.slaveof = block_pipeline_command(Redis.slaveof) ClusterPipeline.slowlog_get = block_pipeline_command(Redis.slowlog_get) ClusterPipeline.slowlog_len = block_pipeline_command(Redis.slowlog_len) ClusterPipeline.slowlog_reset = block_pipeline_command(Redis.slowlog_reset) ClusterPipeline.smove = block_pipeline_command(Redis.smove) ClusterPipeline.sort = block_pipeline_command(Redis.sort) ClusterPipeline.sunion = block_pipeline_command(Redis.sunion) ClusterPipeline.sunionstore = block_pipeline_command(Redis.sunionstore) ClusterPipeline.time = block_pipeline_command(Redis.time) class PipelineCommand(object): """ """ def __init__(self, args, options=None, position=None): self.args = args if options is None: options = {} self.options = options self.position = position self.result = None self.node = None self.asking = False class NodeCommands(object): """ """ def __init__(self, parse_response, connection): """ """ self.parse_response = parse_response self.connection = connection self.commands = [] def append(self, c): """ """ self.commands.append(c) def write(self): """ Code borrowed from Redis so it can be fixed """ connection = self.connection commands = self.commands # We are going to clobber the commands with the write, so go ahead # and ensure that nothing is sitting there from a previous run. for c in commands: c.result = None # build up all commands into a single request to increase network perf # send all the commands and catch connection and timeout errors. try: connection.send_packed_command(connection.pack_commands([c.args for c in commands])) except (ConnectionError, TimeoutError) as e: for c in commands: c.result = e def read(self): """ """ connection = self.connection for c in self.commands: # if there is a result on this command, it means we ran into an exception # like a connection error. Trying to parse a response on a connection that # is no longer open will result in a connection error raised by redis-py. # but redis-py doesn't check in parse_response that the sock object is # still set and if you try to read from a closed connection, it will # result in an AttributeError because it will do a readline() call on None. # This can have all kinds of nasty side-effects. # Treating this case as a connection error is fine because it will dump # the connection object back into the pool and on the next write, it will # explicitly open the connection and all will be well. if c.result is None: try: c.result = self.parse_response(connection, c.args[0], **c.options) except (ConnectionError, TimeoutError) as e: for c in self.commands: c.result = e return except RedisError: c.result = sys.exc_info()[1] redis-py-cluster-2.0.0/rediscluster/pubsub.py000066400000000000000000000022111352661744600213200ustar00rootroot00000000000000# -*- coding: utf-8 -*- # 3rd party imports from redis.client import PubSub class ClusterPubSub(PubSub): """ Wrapper for PubSub class. """ def __init__(self, *args, **kwargs): super(ClusterPubSub, self).__init__(*args, **kwargs) def execute_command(self, *args, **kwargs): """ Execute a publish/subscribe command. Taken code from redis-py and tweak to make it work within a cluster. """ # NOTE: don't parse the response in this function -- it could pull a # legitimate message off the stack if the connection is already # subscribed to one or more channels if self.connection is None: self.connection = self.connection_pool.get_connection( 'pubsub', self.shard_hint, channel=args[1], ) # register a callback that re-subscribes to any channels we # were listening to when we were disconnected self.connection.register_connect_callback(self.on_connect) connection = self.connection self._execute(connection, connection.send_command, *args) redis-py-cluster-2.0.0/rediscluster/utils.py000066400000000000000000000155471352661744600212000ustar00rootroot00000000000000# -*- coding: utf-8 -*- from socket import gethostbyaddr from functools import wraps # rediscluster imports from .exceptions import ( RedisClusterException, ClusterDownError ) # 3rd party imports from redis._compat import basestring, nativestr def bool_ok(response, *args, **kwargs): """ Borrowed from redis._compat becuase that method to not support extra arguments when used in a cluster environment. """ return nativestr(response) == 'OK' def string_keys_to_dict(key_strings, callback): """ Maps each string in `key_strings` to `callback` function and return as a dict. """ return dict.fromkeys(key_strings, callback) def dict_merge(*dicts): """ Merge all provided dicts into 1 dict. """ merged = {} for d in dicts: if not isinstance(d, dict): raise ValueError('Value should be of dict type') else: merged.update(d) return merged def blocked_command(self, command): """ Raises a `RedisClusterException` mentioning the command is blocked. """ raise RedisClusterException("Command: {0} is blocked in redis cluster mode".format(command)) def merge_result(command, res): """ Merge all items in `res` into a list. This command is used when sending a command to multiple nodes and they result from each node should be merged into a single list. """ if not isinstance(res, dict): raise ValueError('Value should be of dict type') result = set([]) for _, v in res.items(): for value in v: result.add(value) return list(result) def first_key(command, res): """ Returns the first result for the given command. If more then 1 result is returned then a `RedisClusterException` is raised. """ if not isinstance(res, dict): raise ValueError('Value should be of dict type') if len(res.keys()) != 1: raise RedisClusterException("More then 1 result from command: {0}".format(command)) return list(res.values())[0] def clusterdown_wrapper(func): """ Wrapper for CLUSTERDOWN error handling. If the cluster reports it is down it is assumed that: - connection_pool was disconnected - connection_pool was reseted - refereh_table_asap set to True It will try 3 times to rerun the command and raises ClusterDownException if it continues to fail. """ @wraps(func) def inner(*args, **kwargs): for _ in range(0, 3): try: return func(*args, **kwargs) except ClusterDownError: # Try again with the new cluster setup. All other errors # should be raised. pass # If it fails 3 times then raise exception back to caller raise ClusterDownError("CLUSTERDOWN error. Unable to rebuild the cluster") return inner def nslookup(node_ip): """ """ if ':' not in node_ip: return gethostbyaddr(node_ip)[0] ip, port = node_ip.split(':') return '{0}:{1}'.format(gethostbyaddr(ip)[0], port) def parse_cluster_slots(resp, **options): """ """ current_host = options.get('current_host', '') def fix_server(*args): return (nativestr(args[0]) or current_host, args[1]) slots = {} for slot in resp: start, end, master = slot[:3] slaves = slot[3:] slots[start, end] = { 'master': fix_server(*master), 'slaves': [fix_server(*slave) for slave in slaves], } return slots def parse_cluster_nodes(resp, **options): """ @see: http://redis.io/commands/cluster-nodes # string @see: http://redis.io/commands/cluster-slaves # list of string """ resp = nativestr(resp) current_host = options.get('current_host', '') def parse_slots(s): slots, migrations = [], [] for r in s.split(' '): if '->-' in r: slot_id, dst_node_id = r[1:-1].split('->-', 1) migrations.append({ 'slot': int(slot_id), 'node_id': dst_node_id, 'state': 'migrating' }) elif '-<-' in r: slot_id, src_node_id = r[1:-1].split('-<-', 1) migrations.append({ 'slot': int(slot_id), 'node_id': src_node_id, 'state': 'importing' }) elif '-' in r: start, end = r.split('-') slots.extend(range(int(start), int(end) + 1)) else: slots.append(int(r)) return slots, migrations if isinstance(resp, basestring): resp = resp.splitlines() nodes = [] for line in resp: parts = line.split(' ', 8) self_id, addr, flags, master_id, ping_sent, \ pong_recv, config_epoch, link_state = parts[:8] host, ports = addr.rsplit(':', 1) port, _, cluster_port = ports.partition('@') node = { 'id': self_id, 'host': host or current_host, 'port': int(port), 'cluster-bus-port': int(cluster_port) if cluster_port else 10000 + int(port), 'flags': tuple(flags.split(',')), 'master': master_id if master_id != '-' else None, 'ping-sent': int(ping_sent), 'pong-recv': int(pong_recv), 'link-state': link_state, 'slots': [], 'migrations': [], } if len(parts) >= 9: slots, migrations = parse_slots(parts[8]) node['slots'], node['migrations'] = tuple(slots), migrations nodes.append(node) return nodes def parse_pubsub_channels(command, res, **options): """ Result callback, handles different return types switchable by the `aggregate` flag. """ aggregate = options.get('aggregate', True) if not aggregate: return res return merge_result(command, res) def parse_pubsub_numpat(command, res, **options): """ Result callback, handles different return types switchable by the `aggregate` flag. """ aggregate = options.get('aggregate', True) if not aggregate: return res numpat = 0 for node, node_numpat in res.items(): numpat += node_numpat return numpat def parse_pubsub_numsub(command, res, **options): """ Result callback, handles different return types switchable by the `aggregate` flag. """ aggregate = options.get('aggregate', True) if not aggregate: return res numsub_d = dict() for _, numsub_tups in res.items(): for channel, numsubbed in numsub_tups: try: numsub_d[channel] += numsubbed except KeyError: numsub_d[channel] = numsubbed ret_numsub = [] for channel, numsub in numsub_d.items(): ret_numsub.append((channel, numsub)) return ret_numsub redis-py-cluster-2.0.0/requirements.txt000066400000000000000000000000241352661744600202220ustar00rootroot00000000000000redis>=3.0.0,<3.1.0 redis-py-cluster-2.0.0/setup.cfg000066400000000000000000000002071352661744600165620ustar00rootroot00000000000000[bdist_wheel] universal=1 [metadata] license_file = LICENSE [pycodestyle] show-source = 1 exclude = .venv,.tox,dist,docs,build,*.egg redis-py-cluster-2.0.0/setup.py000066400000000000000000000042271352661744600164610ustar00rootroot00000000000000# -*- coding: utf-8 -*- # python std lib import os try: from setuptools import setup except ImportError: from distutils.core import setup # if you are using vagrant, just delete os.link directly, # The hard link only saves a little disk space, so you should not care if os.getenv('USER', '').lower() == 'vagrant': del os.link with open('README.md') as f: readme = f.read() with open(os.path.join('docs', 'release-notes.rst')) as f: history = f.read() setup( name="redis-py-cluster", version="2.0.0", description="Library for communicating with Redis Clusters. Built on top of redis-py lib", long_description=readme + '\n\n' + history, long_description_content_type="text/markdown", author="Johan Andersson", author_email="Grokzen@gmail.com", maintainer='Johan Andersson', maintainer_email='Grokzen@gmail.com', packages=["rediscluster"], url='http://github.com/grokzen/redis-py-cluster', license='MIT', install_requires=[ 'redis>=3.0.0,<3.1.0' ], python_requires=">=2.7, !=3.0.*, !=3.1.*, !=3.2.*, !=3.3.*", extras_require={ 'hiredis': [ "hiredis>=0.1.3", ], }, keywords=[ 'redis', 'redis cluster', ], classifiers=[ # As from https://pypi.python.org/pypi?%3Aaction=list_classifiers # 'Development Status :: 1 - Planning', # 'Development Status :: 2 - Pre-Alpha', # 'Development Status :: 3 - Alpha', # 'Development Status :: 4 - Beta', 'Development Status :: 5 - Production/Stable', # 'Development Status :: 6 - Mature', # 'Development Status :: 7 - Inactive', 'Programming Language :: Python', 'Programming Language :: Python :: 2', 'Programming Language :: Python :: 2.7', 'Programming Language :: Python :: 3', 'Programming Language :: Python :: 3.4', 'Programming Language :: Python :: 3.5', 'Programming Language :: Python :: 3.6', 'Programming Language :: Python :: 3.7', 'Environment :: Web Environment', 'Operating System :: POSIX', 'License :: OSI Approved :: MIT License', ] ) redis-py-cluster-2.0.0/tests/000077500000000000000000000000001352661744600161045ustar00rootroot00000000000000redis-py-cluster-2.0.0/tests/__init__.py000066400000000000000000000000001352661744600202030ustar00rootroot00000000000000redis-py-cluster-2.0.0/tests/conftest.py000066400000000000000000000155671352661744600203210ustar00rootroot00000000000000# -*- coding: utf-8 -*- # python std lib import os import sys import json # rediscluster imports from rediscluster import RedisCluster # 3rd party imports import pytest from distutils.version import StrictVersion from mock import Mock from redis import Redis from redis.exceptions import ResponseError # put our path in front so we can be sure we are testing locally not against the global package basepath = os.path.dirname(os.path.dirname(os.path.abspath(__file__))) sys.path.insert(1, basepath) _REDIS_VERSIONS = {} def get_version(**kwargs): params = {'host': 'localhost', 'port': 7000} params.update(kwargs) key = '%s:%s' % (params['host'], params['port']) if key not in _REDIS_VERSIONS: client = RedisCluster(**params) # INFO command returns for all nodes but we only care for port 7000 client_info = client.info() for client_id, client_data in client_info.items(): if '7000' in key: _REDIS_VERSIONS[key] = client_data['redis_version'] client.connection_pool.disconnect() return _REDIS_VERSIONS[key] def _get_client(cls, request=None, **kwargs): params = {'host': 'localhost', 'port': 7000} params.update(kwargs) client = cls(**params) client.flushdb() if request: def teardown(): client.flushdb() client.connection_pool.disconnect() request.addfinalizer(teardown) return client def _init_client(request, cls=None, **kwargs): """ """ client = _get_client(cls=cls, **kwargs) client.flushdb() if request: def teardown(): client.flushdb() client.connection_pool.disconnect() request.addfinalizer(teardown) return client def _init_mgt_client(request, cls=None, **kwargs): """ """ client = _get_client(cls=cls, **kwargs) if request: def teardown(): client.connection_pool.disconnect() request.addfinalizer(teardown) return client def skip_for_no_cluster_impl(): return pytest.mark.skipif(True, reason="Cluster has no or working implementation for this test") def skip_if_not_password_protected_nodes(): """ """ return pytest.mark.skipif('TEST_PASSWORD_PROTECTED' not in os.environ, reason="") def skip_if_server_version_lt(min_version): check = StrictVersion(get_version()) < StrictVersion(min_version) return pytest.mark.skipif(check, reason="") def skip_if_server_version_gte(min_version): check = StrictVersion(get_version()) >= StrictVersion(min_version) return pytest.mark.skipif(check, reason="") def skip_if_redis_py_version_lt(min_version): """ """ import redis version = redis.__version__ if StrictVersion(version) < StrictVersion(min_version): return pytest.mark.skipif(True, reason="") return pytest.mark.skipif(False, reason="") @pytest.fixture() def o(request, *args, **kwargs): """ Create a RedisCluster instance with decode_responses set to True. """ return _init_client(request, cls=RedisCluster, decode_responses=True, **kwargs) @pytest.fixture() def r(request, *args, **kwargs): """ Create a RedisCluster instance with default settings. """ return _init_client(request, cls=RedisCluster, **kwargs) @pytest.fixture() def ro(request, *args, **kwargs): """ Create a RedisCluster instance with readonly mode """ params = {'readonly_mode': True} params.update(kwargs) return _init_client(request, cls=RedisCluster, **params) @pytest.fixture() def s(*args, **kwargs): """ Create a RedisCluster instance with 'init_slot_cache' set to false """ s = _get_client(RedisCluster, init_slot_cache=False, **kwargs) assert s.connection_pool.nodes.slots == {} assert s.connection_pool.nodes.nodes == {} return s @pytest.fixture() def t(*args, **kwargs): """ Create a regular Redis object instance """ return Redis(*args, **kwargs) @pytest.fixture() def sr(request, *args, **kwargs): """ Returns a instance of RedisCluster """ return _init_client(request, reinitialize_steps=1, cls=RedisCluster, **kwargs) def _gen_cluster_mock_resp(r, response): mock_connection_pool = Mock() connection = Mock() response = response connection.read_response.return_value = response mock_connection_pool.get_connection.return_value = connection r.connection_pool = mock_connection_pool return r @pytest.fixture() def mock_cluster_resp_ok(request, **kwargs): r = _get_client(RedisCluster, request, **kwargs) return _gen_cluster_mock_resp(r, 'OK') @pytest.fixture() def mock_cluster_resp_int(request, **kwargs): r = _get_client(RedisCluster, request, **kwargs) return _gen_cluster_mock_resp(r, '2') @pytest.fixture() def mock_cluster_resp_info(request, **kwargs): r = _get_client(RedisCluster, request, **kwargs) response = ('cluster_state:ok\r\ncluster_slots_assigned:16384\r\n' 'cluster_slots_ok:16384\r\ncluster_slots_pfail:0\r\n' 'cluster_slots_fail:0\r\ncluster_known_nodes:7\r\n' 'cluster_size:3\r\ncluster_current_epoch:7\r\n' 'cluster_my_epoch:2\r\ncluster_stats_messages_sent:170262\r\n' 'cluster_stats_messages_received:105653\r\n') return _gen_cluster_mock_resp(r, response) @pytest.fixture() def mock_cluster_resp_nodes(request, **kwargs): r = _get_client(RedisCluster, request, **kwargs) response = ('c8253bae761cb1ecb2b61857d85dfe455a0fec8b 172.17.0.7:7006 ' 'slave aa90da731f673a99617dfe930306549a09f83a6b 0 ' '1447836263059 5 connected\n' '9bd595fe4821a0e8d6b99d70faa660638a7612b3 172.17.0.7:7008 ' 'master - 0 1447836264065 0 connected\n' 'aa90da731f673a99617dfe930306549a09f83a6b 172.17.0.7:7003 ' 'myself,master - 0 0 2 connected 5461-10922\n' '1df047e5a594f945d82fc140be97a1452bcbf93e 172.17.0.7:7007 ' 'slave 19efe5a631f3296fdf21a5441680f893e8cc96ec 0 ' '1447836262556 3 connected\n' '4ad9a12e63e8f0207025eeba2354bcf4c85e5b22 172.17.0.7:7005 ' 'master - 0 1447836262555 7 connected 0-5460\n' '19efe5a631f3296fdf21a5441680f893e8cc96ec 172.17.0.7:7004 ' 'master - 0 1447836263562 3 connected 10923-16383\n' 'fbb23ed8cfa23f17eaf27ff7d0c410492a1093d6 172.17.0.7:7002 ' 'master,fail - 1447829446956 1447829444948 1 disconnected\n' ) return _gen_cluster_mock_resp(r, response) @pytest.fixture() def mock_cluster_resp_slaves(request, **kwargs): r = _get_client(RedisCluster, request, **kwargs) response = ("['1df047e5a594f945d82fc140be97a1452bcbf93e 172.17.0.7:7007 " "slave 19efe5a631f3296fdf21a5441680f893e8cc96ec 0 " "1447836789290 3 connected']") return _gen_cluster_mock_resp(r, response) redis-py-cluster-2.0.0/tests/redis-trib.rb000077500000000000000000001410031352661744600204770ustar00rootroot00000000000000#!/usr/bin/env ruby # TODO (temporary here, we'll move this into the Github issues once # redis-trib initial implementation is completed). # # - Make sure that if the rehashing fails in the middle redis-trib will try # to recover. # - When redis-trib performs a cluster check, if it detects a slot move in # progress it should prompt the user to continue the move from where it # stopped. # - Gracefully handle Ctrl+C in move_slot to prompt the user if really stop # while rehashing, and performing the best cleanup possible if the user # forces the quit. # - When doing "fix" set a global Fix to true, and prompt the user to # fix the problem if automatically fixable every time there is something # to fix. For instance: # 1) If there is a node that pretend to receive a slot, or to migrate a # slot, but has no entries in that slot, fix it. # 2) If there is a node having keys in slots that are not owned by it # fix this condition moving the entries in the same node. # 3) Perform more possibly slow tests about the state of the cluster. # 4) When aborted slot migration is detected, fix it. require 'rubygems' require 'redis' ClusterHashSlots = 16384 def xputs(s) case s[0..2] when ">>>" color="29;1" when "[ER" color="31;1" when "[OK" color="32" when "[FA","***" color="33" else color=nil end color = nil if ENV['TERM'] != "xterm" print "\033[#{color}m" if color print s print "\033[0m" if color print "\n" end class ClusterNode def initialize(addr, pass) s = addr.split(":") if s.length < 2 puts "Invalid IP or Port (given as #{addr}) - use IP:Port format" exit 1 end port = s.pop # removes port from split array ip = s.join(":") # if s.length > 1 here, it's IPv6, so restore address @r = nil @info = {} @info[:host] = ip @info[:port] = port @info[:slots] = {} @info[:migrating] = {} @info[:importing] = {} @info[:replicate] = false @info[:password] = pass @dirty = false # True if we need to flush slots info into node. @friends = [] end def friends @friends end def slots @info[:slots] end def has_flag?(flag) @info[:flags].index(flag) end def to_s "#{@info[:host]}:#{@info[:port]}" end def connect(o={}) return if @r print "Connecting to node #{self}: " STDOUT.flush begin if @info[:password] == nil puts "connecting without password" STDOUT.flush @r = Redis.new(:host => @info[:host], :port => @info[:port], :timeout => 60) else puts "connecting with password", @info[:password] STDOUT.flush @r = Redis.new(:host => @info[:host], :port => @info[:port], :password => @info[:password], :timeout => 60) end @r.ping rescue xputs "[ERR] Sorry, can't connect to node #{self}" puts $! exit 1 if o[:abort] @r = nil end xputs "OK" end def assert_cluster info = @r.info if !info["cluster_enabled"] || info["cluster_enabled"].to_i == 0 xputs "[ERR] Node #{self} is not configured as a cluster node." exit 1 end end def assert_empty if !(@r.cluster("info").split("\r\n").index("cluster_known_nodes:1")) || (@r.info['db0']) xputs "[ERR] Node #{self} is not empty. Either the node already knows other nodes (check with CLUSTER NODES) or contains some key in database 0." exit 1 end end def load_info(o={}) self.connect nodes = @r.cluster("nodes").split("\n") nodes.each{|n| # name addr flags role ping_sent ping_recv link_status slots split = n.split name,addr,flags,master_id,ping_sent,ping_recv,config_epoch,link_status = split[0..6] slots = split[8..-1] info = { :name => name, :addr => addr, :flags => flags.split(","), :replicate => master_id, :ping_sent => ping_sent.to_i, :ping_recv => ping_recv.to_i, :link_status => link_status } info[:replicate] = false if master_id == "-" if info[:flags].index("myself") @info = @info.merge(info) @info[:slots] = {} slots.each{|s| if s[0..0] == '[' if s.index("->-") # Migrating slot,dst = s[1..-1].split("->-") @info[:migrating][slot.to_i] = dst elsif s.index("-<-") # Importing slot,src = s[1..-1].split("-<-") @info[:importing][slot.to_i] = src end elsif s.index("-") start,stop = s.split("-") self.add_slots((start.to_i)..(stop.to_i)) else self.add_slots((s.to_i)..(s.to_i)) end } if slots @dirty = false @r.cluster("info").split("\n").each{|e| k,v=e.split(":") k = k.to_sym v.chop! if k != :cluster_state @info[k] = v.to_i else @info[k] = v end } elsif o[:getfriends] @friends << info end } end def add_slots(slots) slots.each{|s| @info[:slots][s] = :new } @dirty = true end def set_as_replica(node_id) @info[:replicate] = node_id @dirty = true end def flush_node_config return if !@dirty if @info[:replicate] begin @r.cluster("replicate",@info[:replicate]) rescue # If the cluster did not already joined it is possible that # the slave does not know the master node yet. So on errors # we return ASAP leaving the dirty flag set, to flush the # config later. return end else new = [] @info[:slots].each{|s,val| if val == :new new << s @info[:slots][s] = true end } @r.cluster("addslots",*new) end @dirty = false end def info_string # We want to display the hash slots assigned to this node # as ranges, like in: "1-5,8-9,20-25,30" # # Note: this could be easily written without side effects, # we use 'slots' just to split the computation into steps. # First step: we want an increasing array of integers # for instance: [1,2,3,4,5,8,9,20,21,22,23,24,25,30] slots = @info[:slots].keys.sort # As we want to aggregate adjacent slots we convert all the # slot integers into ranges (with just one element) # So we have something like [1..1,2..2, ... and so forth. slots.map!{|x| x..x} # Finally we group ranges with adjacent elements. slots = slots.reduce([]) {|a,b| if !a.empty? && b.first == (a[-1].last)+1 a[0..-2] + [(a[-1].first)..(b.last)] else a + [b] end } # Now our task is easy, we just convert ranges with just one # element into a number, and a real range into a start-end format. # Finally we join the array using the comma as separator. slots = slots.map{|x| x.count == 1 ? x.first.to_s : "#{x.first}-#{x.last}" }.join(",") role = self.has_flag?("master") ? "M" : "S" if self.info[:replicate] and @dirty is = "S: #{self.info[:name]} #{self.to_s}" else is = "#{role}: #{self.info[:name]} #{self.to_s}\n"+ " slots:#{slots} (#{self.slots.length} slots) "+ "#{(self.info[:flags]-["myself"]).join(",")}" end if self.info[:replicate] is += "\n replicates #{info[:replicate]}" elsif self.has_flag?("master") && self.info[:replicas] is += "\n #{info[:replicas].length} additional replica(s)" end is end # Return a single string representing nodes and associated slots. # TODO: remove slaves from config when slaves will be handled # by Redis Cluster. def get_config_signature config = [] @r.cluster("nodes").each_line{|l| s = l.split slots = s[8..-1].select {|x| x[0..0] != "["} next if slots.length == 0 config << s[0]+":"+(slots.sort.join(",")) } config.sort.join("|") end def info @info end def is_dirty? @dirty end def r @r end end class RedisTrib def initialize @nodes = [] @fix = false @errors = [] end def check_arity(req_args, num_args) if ((req_args > 0 and num_args != req_args) || (req_args < 0 and num_args < req_args.abs)) xputs "[ERR] Wrong number of arguments for specified sub command" exit 1 end end def add_node(node) @nodes << node end def cluster_error(msg) @errors << msg xputs msg end def get_node_by_name(name) @nodes.each{|n| return n if n.info[:name] == name.downcase } return nil end # This function returns the master that has the least number of replicas # in the cluster. If there are multiple masters with the same smaller # number of replicas, one at random is returned. def get_master_with_least_replicas masters = @nodes.select{|n| n.has_flag? "master"} sorted = masters.sort{|a,b| a.info[:replicas].length <=> b.info[:replicas].length } sorted[0] end def check_cluster xputs ">>> Performing Cluster Check (using node #{@nodes[0]})" show_nodes check_config_consistency check_open_slots check_slots_coverage end # Merge slots of every known node. If the resulting slots are equal # to ClusterHashSlots, then all slots are served. def covered_slots slots = {} @nodes.each{|n| slots = slots.merge(n.slots) } slots end def check_slots_coverage xputs ">>> Check slots coverage..." slots = covered_slots if slots.length == ClusterHashSlots xputs "[OK] All #{ClusterHashSlots} slots covered." else cluster_error \ "[ERR] Not all #{ClusterHashSlots} slots are covered by nodes." fix_slots_coverage if @fix end end def check_open_slots xputs ">>> Check for open slots..." open_slots = [] @nodes.each{|n| if n.info[:migrating].size > 0 cluster_error \ "[WARNING] Node #{n} has slots in migrating state (#{n.info[:migrating].keys.join(",")})." open_slots += n.info[:migrating].keys elsif n.info[:importing].size > 0 cluster_error \ "[WARNING] Node #{n} has slots in importing state (#{n.info[:importing].keys.join(",")})." open_slots += n.info[:importing].keys end } open_slots.uniq! if open_slots.length > 0 xputs "[WARNING] The following slots are open: #{open_slots.join(",")}" end if @fix open_slots.each{|slot| fix_open_slot slot} end end def nodes_with_keys_in_slot(slot) nodes = [] @nodes.each{|n| nodes << n if n.r.cluster("getkeysinslot",slot,1).length > 0 } nodes end def fix_slots_coverage not_covered = (0...ClusterHashSlots).to_a - covered_slots.keys xputs ">>> Fixing slots coverage..." xputs "List of not covered slots: " + not_covered.join(",") # For every slot, take action depending on the actual condition: # 1) No node has keys for this slot. # 2) A single node has keys for this slot. # 3) Multiple nodes have keys for this slot. slots = {} not_covered.each{|slot| nodes = nodes_with_keys_in_slot(slot) slots[slot] = nodes xputs "Slot #{slot} has keys in #{nodes.length} nodes: #{nodes.join}" } none = slots.select {|k,v| v.length == 0} single = slots.select {|k,v| v.length == 1} multi = slots.select {|k,v| v.length > 1} # Handle case "1": keys in no node. if none.length > 0 xputs "The folowing uncovered slots have no keys across the cluster:" xputs none.keys.join(",") yes_or_die "Fix these slots by covering with a random node?" none.each{|slot,nodes| node = @nodes.sample xputs ">>> Covering slot #{slot} with #{node}" node.r.cluster("addslots",slot) } end # Handle case "2": keys only in one node. if single.length > 0 xputs "The folowing uncovered slots have keys in just one node:" puts single.keys.join(",") yes_or_die "Fix these slots by covering with those nodes?" single.each{|slot,nodes| xputs ">>> Covering slot #{slot} with #{nodes[0]}" nodes[0].r.cluster("addslots",slot) } end # Handle case "3": keys in multiple nodes. if multi.length > 0 xputs "The folowing uncovered slots have keys in multiple nodes:" xputs multi.keys.join(",") yes_or_die "Fix these slots by moving keys into a single node?" multi.each{|slot,nodes| xputs ">>> Covering slot #{slot} moving keys to #{nodes[0]}" # TODO # 1) Set all nodes as "MIGRATING" for this slot, so that we # can access keys in the hash slot using ASKING. # 2) Move everything to node[0] # 3) Clear MIGRATING from nodes, and ADDSLOTS the slot to # node[0]. raise "TODO: Work in progress" } end end # Return the owner of the specified slot def get_slot_owner(slot) @nodes.each{|n| n.slots.each{|s,_| return n if s == slot } } nil end # Slot 'slot' was found to be in importing or migrating state in one or # more nodes. This function fixes this condition by migrating keys where # it seems more sensible. def fix_open_slot(slot) puts ">>> Fixing open slot #{slot}" # Try to obtain the current slot owner, according to the current # nodes configuration. owner = get_slot_owner(slot) # If there is no slot owner, set as owner the slot with the biggest # number of keys, among the set of migrating / importing nodes. if !owner xputs "*** Fix me, some work to do here." # Select owner... # Use ADDSLOTS to assign the slot. exit 1 end migrating = [] importing = [] @nodes.each{|n| next if n.has_flag? "slave" if n.info[:migrating][slot] migrating << n elsif n.info[:importing][slot] importing << n elsif n.r.cluster("countkeysinslot",slot) > 0 && n != owner xputs "*** Found keys about slot #{slot} in node #{n}!" importing << n end } puts "Set as migrating in: #{migrating.join(",")}" puts "Set as importing in: #{importing.join(",")}" # Case 1: The slot is in migrating state in one slot, and in # importing state in 1 slot. That's trivial to address. if migrating.length == 1 && importing.length == 1 move_slot(migrating[0],importing[0],slot,:verbose=>true,:fix=>true) # Case 2: There are multiple nodes that claim the slot as importing, # they probably got keys about the slot after a restart so opened # the slot. In this case we just move all the keys to the owner # according to the configuration. elsif migrating.length == 0 && importing.length > 0 xputs ">>> Moving all the #{slot} slot keys to its owner #{owner}" importing.each {|node| next if node == owner move_slot(node,owner,slot,:verbose=>true,:fix=>true,:cold=>true) xputs ">>> Setting #{slot} as STABLE in #{node}" node.r.cluster("setslot",slot,"stable") } # Case 3: There are no slots claiming to be in importing state, but # there is a migrating node that actually don't have any key. We # can just close the slot, probably a reshard interrupted in the middle. elsif importing.length == 0 && migrating.length == 1 && migrating[0].r.cluster("getkeysinslot",slot,10).length == 0 migrating[0].r.cluster("setslot",slot,"stable") else xputs "[ERR] Sorry, Redis-trib can't fix this slot yet (work in progress). Slot is set as migrating in #{migrating.join(",")}, as importing in #{importing.join(",")}, owner is #{owner}" end end # Check if all the nodes agree about the cluster configuration def check_config_consistency if !is_config_consistent? cluster_error "[ERR] Nodes don't agree about configuration!" else xputs "[OK] All nodes agree about slots configuration." end end def is_config_consistent? signatures=[] @nodes.each{|n| signatures << n.get_config_signature } return signatures.uniq.length == 1 end def wait_cluster_join print "Waiting for the cluster to join" while !is_config_consistent? print "." STDOUT.flush sleep 1 end print "\n" end def alloc_slots nodes_count = @nodes.length masters_count = @nodes.length / (@replicas+1) masters = [] # The first step is to split instances by IP. This is useful as # we'll try to allocate master nodes in different physical machines # (as much as possible) and to allocate slaves of a given master in # different physical machines as well. # # This code assumes just that if the IP is different, than it is more # likely that the instance is running in a different physical host # or at least a different virtual machine. ips = {} @nodes.each{|n| ips[n.info[:host]] = [] if !ips[n.info[:host]] ips[n.info[:host]] << n } # Select master instances puts "Using #{masters_count} masters:" interleaved = [] stop = false while not stop do # Take one node from each IP until we run out of nodes # across every IP. ips.each do |ip,nodes| if nodes.empty? # if this IP has no remaining nodes, check for termination if interleaved.length == nodes_count # stop when 'interleaved' has accumulated all nodes stop = true next end else # else, move one node from this IP to 'interleaved' interleaved.push nodes.shift end end end masters = interleaved.slice!(0, masters_count) nodes_count -= masters.length masters.each{|m| puts m} # Alloc slots on masters slots_per_node = ClusterHashSlots.to_f / masters_count first = 0 cursor = 0.0 masters.each_with_index{|n,masternum| last = (cursor+slots_per_node-1).round if last > ClusterHashSlots || masternum == masters.length-1 last = ClusterHashSlots-1 end last = first if last < first # Min step is 1. n.add_slots first..last first = last+1 cursor += slots_per_node } # Select N replicas for every master. # We try to split the replicas among all the IPs with spare nodes # trying to avoid the host where the master is running, if possible. # # Note we loop two times. The first loop assigns the requested # number of replicas to each master. The second loop assigns any # remaining instances as extra replicas to masters. Some masters # may end up with more than their requested number of replicas, but # all nodes will be used. assignment_verbose = false [:requested,:unused].each do |assign| masters.each do |m| assigned_replicas = 0 while assigned_replicas < @replicas break if nodes_count == 0 if assignment_verbose if assign == :requested puts "Requesting total of #{@replicas} replicas " \ "(#{assigned_replicas} replicas assigned " \ "so far with #{nodes_count} total remaining)." elsif assign == :unused puts "Assigning extra instance to replication " \ "role too (#{nodes_count} remaining)." end end # Return the first node not matching our current master node = interleaved.find{|n| n.info[:host] != m.info[:host]} # If we found a node, use it as a best-first match. # Otherwise, we didn't find a node on a different IP, so we # go ahead and use a same-IP replica. if node slave = node interleaved.delete node else slave = interleaved.shift end slave.set_as_replica(m.info[:name]) nodes_count -= 1 assigned_replicas += 1 puts "Adding replica #{slave} to #{m}" # If we are in the "assign extra nodes" loop, # we want to assign one extra replica to each # master before repeating masters. # This break lets us assign extra replicas to masters # in a round-robin way. break if assign == :unused end end end end def flush_nodes_config @nodes.each{|n| n.flush_node_config } end def show_nodes @nodes.each{|n| xputs n.info_string } end # Redis Cluster config epoch collision resolution code is able to eventually # set a different epoch to each node after a new cluster is created, but # it is slow compared to assign a progressive config epoch to each node # before joining the cluster. However we do just a best-effort try here # since if we fail is not a problem. def assign_config_epoch config_epoch = 1 @nodes.each{|n| begin n.r.cluster("set-config-epoch",config_epoch) rescue end config_epoch += 1 } end def join_cluster # We use a brute force approach to make sure the node will meet # each other, that is, sending CLUSTER MEET messages to all the nodes # about the very same node. # Thanks to gossip this information should propagate across all the # cluster in a matter of seconds. first = false @nodes.each{|n| if !first then first = n.info; next; end # Skip the first node n.r.cluster("meet",first[:host],first[:port]) } end def yes_or_die(msg) print "#{msg} (type 'yes' to accept): " STDOUT.flush if !(STDIN.gets.chomp.downcase == "yes") xputs "*** Aborting..." exit 1 end end def load_cluster_info_from_node(nodeaddr) node = ClusterNode.new(nodeaddr) node.connect(:abort => true) node.assert_cluster node.load_info(:getfriends => true) add_node(node) node.friends.each{|f| next if f[:flags].index("noaddr") || f[:flags].index("disconnected") || f[:flags].index("fail") fnode = ClusterNode.new(f[:addr]) fnode.connect() next if !fnode.r begin fnode.load_info() add_node(fnode) rescue => e xputs "[ERR] Unable to load info for node #{fnode}" end } populate_nodes_replicas_info end # This function is called by load_cluster_info_from_node in order to # add additional information to every node as a list of replicas. def populate_nodes_replicas_info # Start adding the new field to every node. @nodes.each{|n| n.info[:replicas] = [] } # Populate the replicas field using the replicate field of slave # nodes. @nodes.each{|n| if n.info[:replicate] master = get_node_by_name(n.info[:replicate]) if !master xputs "*** WARNING: #{n} claims to be slave of unknown node ID #{n.info[:replicate]}." else master.info[:replicas] << n end end } end # Given a list of source nodes return a "resharding plan" # with what slots to move in order to move "numslots" slots to another # instance. def compute_reshard_table(sources,numslots) moved = [] # Sort from bigger to smaller instance, for two reasons: # 1) If we take less slots than instances it is better to start # getting from the biggest instances. # 2) We take one slot more from the first instance in the case of not # perfect divisibility. Like we have 3 nodes and need to get 10 # slots, we take 4 from the first, and 3 from the rest. So the # biggest is always the first. sources = sources.sort{|a,b| b.slots.length <=> a.slots.length} source_tot_slots = sources.inject(0) {|sum,source| sum+source.slots.length } sources.each_with_index{|s,i| # Every node will provide a number of slots proportional to the # slots it has assigned. n = (numslots.to_f/source_tot_slots*s.slots.length) if i == 0 n = n.ceil else n = n.floor end s.slots.keys.sort[(0...n)].each{|slot| if moved.length < numslots moved << {:source => s, :slot => slot} end } } return moved end def show_reshard_table(table) table.each{|e| puts " Moving slot #{e[:slot]} from #{e[:source].info[:name]}" } end # Move slots between source and target nodes using MIGRATE. # # Options: # :verbose -- Print a dot for every moved key. # :fix -- We are moving in the context of a fix. Use REPLACE. # :cold -- Move keys without opening / reconfiguring the nodes. def move_slot(source,target,slot,o={}) # We start marking the slot as importing in the destination node, # and the slot as migrating in the target host. Note that the order of # the operations is important, as otherwise a client may be redirected # to the target node that does not yet know it is importing this slot. print "Moving slot #{slot} from #{source} to #{target}: "; STDOUT.flush if !o[:cold] target.r.cluster("setslot",slot,"importing",source.info[:name]) source.r.cluster("setslot",slot,"migrating",target.info[:name]) end # Migrate all the keys from source to target using the MIGRATE command while true keys = source.r.cluster("getkeysinslot",slot,10) break if keys.length == 0 keys.each{|key| begin source.r.client.call(["migrate",target.info[:host],target.info[:port],key,0,15000]) rescue => e if o[:fix] && e.to_s =~ /BUSYKEY/ xputs "*** Target key #{key} exists. Replacing it for FIX." source.r.client.call(["migrate",target.info[:host],target.info[:port],key,0,15000,:replace]) else puts "" xputs "[ERR] #{e}" exit 1 end end print "." if o[:verbose] STDOUT.flush } end puts # Set the new node as the owner of the slot in all the known nodes. if !o[:cold] @nodes.each{|n| n.r.cluster("setslot",slot,"node",target.info[:name]) } end end # redis-trib subcommands implementations def check_cluster_cmd(argv,opt) load_cluster_info_from_node(argv[0]) check_cluster end def fix_cluster_cmd(argv,opt) @fix = true load_cluster_info_from_node(argv[0]) check_cluster end def reshard_cluster_cmd(argv,opt) load_cluster_info_from_node(argv[0]) check_cluster if @errors.length != 0 puts "*** Please fix your cluster problems before resharding" exit 1 end # Get number of slots if opt['slots'] numslots = opt['slots'].to_i else numslots = 0 while numslots <= 0 or numslots > ClusterHashSlots print "How many slots do you want to move (from 1 to #{ClusterHashSlots})? " numslots = STDIN.gets.to_i end end # Get the target instance if opt['to'] target = get_node_by_name(opt['to']) if !target || target.has_flag?("slave") xputs "*** The specified node is not known or not a master, please retry." exit 1 end else target = nil while not target print "What is the receiving node ID? " target = get_node_by_name(STDIN.gets.chop) if !target || target.has_flag?("slave") xputs "*** The specified node is not known or not a master, please retry." target = nil end end end # Get the source instances sources = [] if opt['from'] opt['from'].split(',').each{|node_id| if node_id == "all" sources = "all" break end src = get_node_by_name(node_id) if !src || src.has_flag?("slave") xputs "*** The specified node is not known or is not a master, please retry." exit 1 end sources << src } else xputs "Please enter all the source node IDs." xputs " Type 'all' to use all the nodes as source nodes for the hash slots." xputs " Type 'done' once you entered all the source nodes IDs." while true print "Source node ##{sources.length+1}:" line = STDIN.gets.chop src = get_node_by_name(line) if line == "done" break elsif line == "all" sources = "all" break elsif !src || src.has_flag?("slave") xputs "*** The specified node is not known or is not a master, please retry." elsif src.info[:name] == target.info[:name] xputs "*** It is not possible to use the target node as source node." else sources << src end end end if sources.length == 0 puts "*** No source nodes given, operation aborted" exit 1 end # Handle soures == all. if sources == "all" sources = [] @nodes.each{|n| next if n.info[:name] == target.info[:name] next if n.has_flag?("slave") sources << n } end # Check if the destination node is the same of any source nodes. if sources.index(target) xputs "*** Target node is also listed among the source nodes!" exit 1 end puts "\nReady to move #{numslots} slots." puts " Source nodes:" sources.each{|s| puts " "+s.info_string} puts " Destination node:" puts " #{target.info_string}" reshard_table = compute_reshard_table(sources,numslots) puts " Resharding plan:" show_reshard_table(reshard_table) if !opt['yes'] print "Do you want to proceed with the proposed reshard plan (yes/no)? " yesno = STDIN.gets.chop exit(1) if (yesno != "yes") end reshard_table.each{|e| move_slot(e[:source],target,e[:slot],:verbose=>true) } end # This is an helper function for create_cluster_cmd that verifies if # the number of nodes and the specified replicas have a valid configuration # where there are at least three master nodes and enough replicas per node. def check_create_parameters masters = @nodes.length/(@replicas+1) if masters < 3 puts "*** ERROR: Invalid configuration for cluster creation." puts "*** Redis Cluster requires at least 3 master nodes." puts "*** This is not possible with #{@nodes.length} nodes and #{@replicas} replicas per node." puts "*** At least #{3*(@replicas+1)} nodes are required." exit 1 end end def create_cluster_cmd(argv,opt) opt = {'replicas' => 0, 'password' => nil}.merge(opt) @replicas = opt['replicas'].to_i xputs ">>> Creating cluster" argv[0..-1].each{|n| node = ClusterNode.new(n, opt['password']) node.connect(:abort => true) node.assert_cluster node.load_info node.assert_empty add_node(node) } check_create_parameters xputs ">>> Performing hash slots allocation on #{@nodes.length} nodes..." alloc_slots show_nodes yes_or_die "Can I set the above configuration?" flush_nodes_config xputs ">>> Nodes configuration updated" xputs ">>> Assign a different config epoch to each node" assign_config_epoch xputs ">>> Sending CLUSTER MEET messages to join the cluster" join_cluster # Give one second for the join to start, in order to avoid that # wait_cluster_join will find all the nodes agree about the config as # they are still empty with unassigned slots. sleep 1 wait_cluster_join flush_nodes_config # Useful for the replicas check_cluster end def addnode_cluster_cmd(argv,opt) xputs ">>> Adding node #{argv[0]} to cluster #{argv[1]}" # Check the existing cluster load_cluster_info_from_node(argv[1]) check_cluster # If --master-id was specified, try to resolve it now so that we # abort before starting with the node configuration. if opt['slave'] if opt['master-id'] master = get_node_by_name(opt['master-id']) if !master xputs "[ERR] No such master ID #{opt['master-id']}" end else master = get_master_with_least_replicas xputs "Automatically selected master #{master}" end end # Add the new node new = ClusterNode.new(argv[0]) new.connect(:abort => true) new.assert_cluster new.load_info new.assert_empty first = @nodes.first.info add_node(new) # Send CLUSTER MEET command to the new node xputs ">>> Send CLUSTER MEET to node #{new} to make it join the cluster." new.r.cluster("meet",first[:host],first[:port]) # Additional configuration is needed if the node is added as # a slave. if opt['slave'] wait_cluster_join xputs ">>> Configure node as replica of #{master}." new.r.cluster("replicate",master.info[:name]) end xputs "[OK] New node added correctly." end def delnode_cluster_cmd(argv,opt) id = argv[1].downcase xputs ">>> Removing node #{id} from cluster #{argv[0]}" # Load cluster information load_cluster_info_from_node(argv[0]) # Check if the node exists and is not empty node = get_node_by_name(id) if !node xputs "[ERR] No such node ID #{id}" exit 1 end if node.slots.length != 0 xputs "[ERR] Node #{node} is not empty! Reshard data away and try again." exit 1 end # Send CLUSTER FORGET to all the nodes but the node to remove xputs ">>> Sending CLUSTER FORGET messages to the cluster..." @nodes.each{|n| next if n == node if n.info[:replicate] && n.info[:replicate].downcase == id # Reconfigure the slave to replicate with some other node master = get_master_with_least_replicas xputs ">>> #{n} as replica of #{master}" n.r.cluster("replicate",master.info[:name]) end n.r.cluster("forget",argv[1]) } # Finally shutdown the node xputs ">>> SHUTDOWN the node." node.r.shutdown end def set_timeout_cluster_cmd(argv,opt) timeout = argv[1].to_i if timeout < 100 puts "Setting a node timeout of less than 100 milliseconds is a bad idea." exit 1 end # Load cluster information load_cluster_info_from_node(argv[0]) ok_count = 0 err_count = 0 # Send CLUSTER FORGET to all the nodes but the node to remove xputs ">>> Reconfiguring node timeout in every cluster node..." @nodes.each{|n| begin n.r.config("set","cluster-node-timeout",timeout) n.r.config("rewrite") ok_count += 1 xputs "*** New timeout set for #{n}" rescue => e puts "ERR setting node-timeot for #{n}: #{e}" err_count += 1 end } xputs ">>> New node timeout set. #{ok_count} OK, #{err_count} ERR." end def call_cluster_cmd(argv,opt) cmd = argv[1..-1] cmd[0] = cmd[0].upcase # Load cluster information load_cluster_info_from_node(argv[0]) xputs ">>> Calling #{cmd.join(" ")}" @nodes.each{|n| begin res = n.r.send(*cmd) puts "#{n}: #{res}" rescue => e puts "#{n}: #{e}" end } end def import_cluster_cmd(argv,opt) source_addr = opt['from'] xputs ">>> Importing data from #{source_addr} to cluster #{argv[1]}" use_copy = opt['copy'] use_replace = opt['replace'] # Check the existing cluster. load_cluster_info_from_node(argv[0]) check_cluster # Connect to the source node. xputs ">>> Connecting to the source Redis instance" src_host,src_port = source_addr.split(":") source = Redis.new(:host =>src_host, :port =>src_port) if source.info['cluster_enabled'].to_i == 1 xputs "[ERR] The source node should not be a cluster node." end xputs "*** Importing #{source.dbsize} keys from DB 0" # Build a slot -> node map slots = {} @nodes.each{|n| n.slots.each{|s,_| slots[s] = n } } # Use SCAN to iterate over the keys, migrating to the # right node as needed. cursor = nil while cursor != 0 cursor,keys = source.scan(cursor, :count => 1000) cursor = cursor.to_i keys.each{|k| # Migrate keys using the MIGRATE command. slot = key_to_slot(k) target = slots[slot] print "Migrating #{k} to #{target}: " STDOUT.flush begin cmd = ["migrate",target.info[:host],target.info[:port],k,0,15000] cmd << :copy if use_copy cmd << :replace if use_replace source.client.call(cmd) rescue => e puts e else puts "OK" end } end end def help_cluster_cmd(argv,opt) show_help exit 0 end # Parse the options for the specific command "cmd". # Returns an hash populate with option => value pairs, and the index of # the first non-option argument in ARGV. def parse_options(cmd) idx = 1 ; # Current index into ARGV options={} while idx < ARGV.length && ARGV[idx][0..1] == '--' if ARGV[idx][0..1] == "--" option = ARGV[idx][2..-1] idx += 1 if ALLOWED_OPTIONS[cmd] == nil || ALLOWED_OPTIONS[cmd][option] == nil puts "Unknown option '#{option}' for command '#{cmd}'" exit 1 end if ALLOWED_OPTIONS[cmd][option] value = ARGV[idx] idx += 1 else value = true end options[option] = value else # Remaining arguments are not options. break end end # Enforce mandatory options if ALLOWED_OPTIONS[cmd] ALLOWED_OPTIONS[cmd].each {|option,val| if !options[option] && val == :required puts "Option '--#{option}' is required "+ \ "for subcommand '#{cmd}'" exit 1 end } end return options,idx end end ################################################################################# # Libraries # # We try to don't depend on external libs since this is a critical part # of Redis Cluster. ################################################################################# # This is the CRC16 algorithm used by Redis Cluster to hash keys. # Implementation according to CCITT standards. # # This is actually the XMODEM CRC 16 algorithm, using the # following parameters: # # Name : "XMODEM", also known as "ZMODEM", "CRC-16/ACORN" # Width : 16 bit # Poly : 1021 (That is actually x^16 + x^12 + x^5 + 1) # Initialization : 0000 # Reflect Input byte : False # Reflect Output CRC : False # Xor constant to output CRC : 0000 # Output for "123456789" : 31C3 module RedisClusterCRC16 def RedisClusterCRC16.crc16(bytes) crc = 0 bytes.each_byte{|b| crc = ((crc<<8) & 0xffff) ^ XMODEMCRC16Lookup[((crc>>8)^b) & 0xff] } crc end private XMODEMCRC16Lookup = [ 0x0000,0x1021,0x2042,0x3063,0x4084,0x50a5,0x60c6,0x70e7, 0x8108,0x9129,0xa14a,0xb16b,0xc18c,0xd1ad,0xe1ce,0xf1ef, 0x1231,0x0210,0x3273,0x2252,0x52b5,0x4294,0x72f7,0x62d6, 0x9339,0x8318,0xb37b,0xa35a,0xd3bd,0xc39c,0xf3ff,0xe3de, 0x2462,0x3443,0x0420,0x1401,0x64e6,0x74c7,0x44a4,0x5485, 0xa56a,0xb54b,0x8528,0x9509,0xe5ee,0xf5cf,0xc5ac,0xd58d, 0x3653,0x2672,0x1611,0x0630,0x76d7,0x66f6,0x5695,0x46b4, 0xb75b,0xa77a,0x9719,0x8738,0xf7df,0xe7fe,0xd79d,0xc7bc, 0x48c4,0x58e5,0x6886,0x78a7,0x0840,0x1861,0x2802,0x3823, 0xc9cc,0xd9ed,0xe98e,0xf9af,0x8948,0x9969,0xa90a,0xb92b, 0x5af5,0x4ad4,0x7ab7,0x6a96,0x1a71,0x0a50,0x3a33,0x2a12, 0xdbfd,0xcbdc,0xfbbf,0xeb9e,0x9b79,0x8b58,0xbb3b,0xab1a, 0x6ca6,0x7c87,0x4ce4,0x5cc5,0x2c22,0x3c03,0x0c60,0x1c41, 0xedae,0xfd8f,0xcdec,0xddcd,0xad2a,0xbd0b,0x8d68,0x9d49, 0x7e97,0x6eb6,0x5ed5,0x4ef4,0x3e13,0x2e32,0x1e51,0x0e70, 0xff9f,0xefbe,0xdfdd,0xcffc,0xbf1b,0xaf3a,0x9f59,0x8f78, 0x9188,0x81a9,0xb1ca,0xa1eb,0xd10c,0xc12d,0xf14e,0xe16f, 0x1080,0x00a1,0x30c2,0x20e3,0x5004,0x4025,0x7046,0x6067, 0x83b9,0x9398,0xa3fb,0xb3da,0xc33d,0xd31c,0xe37f,0xf35e, 0x02b1,0x1290,0x22f3,0x32d2,0x4235,0x5214,0x6277,0x7256, 0xb5ea,0xa5cb,0x95a8,0x8589,0xf56e,0xe54f,0xd52c,0xc50d, 0x34e2,0x24c3,0x14a0,0x0481,0x7466,0x6447,0x5424,0x4405, 0xa7db,0xb7fa,0x8799,0x97b8,0xe75f,0xf77e,0xc71d,0xd73c, 0x26d3,0x36f2,0x0691,0x16b0,0x6657,0x7676,0x4615,0x5634, 0xd94c,0xc96d,0xf90e,0xe92f,0x99c8,0x89e9,0xb98a,0xa9ab, 0x5844,0x4865,0x7806,0x6827,0x18c0,0x08e1,0x3882,0x28a3, 0xcb7d,0xdb5c,0xeb3f,0xfb1e,0x8bf9,0x9bd8,0xabbb,0xbb9a, 0x4a75,0x5a54,0x6a37,0x7a16,0x0af1,0x1ad0,0x2ab3,0x3a92, 0xfd2e,0xed0f,0xdd6c,0xcd4d,0xbdaa,0xad8b,0x9de8,0x8dc9, 0x7c26,0x6c07,0x5c64,0x4c45,0x3ca2,0x2c83,0x1ce0,0x0cc1, 0xef1f,0xff3e,0xcf5d,0xdf7c,0xaf9b,0xbfba,0x8fd9,0x9ff8, 0x6e17,0x7e36,0x4e55,0x5e74,0x2e93,0x3eb2,0x0ed1,0x1ef0 ] end # Turn a key name into the corrisponding Redis Cluster slot. def key_to_slot(key) # Only hash what is inside {...} if there is such a pattern in the key. # Note that the specification requires the content that is between # the first { and the first } after the first {. If we found {} without # nothing in the middle, the whole key is hashed as usually. s = key.index "{" if s e = key.index "}",s+1 if e && e != s+1 key = key[s+1..e-1] end end RedisClusterCRC16.crc16(key) % 16384 end ################################################################################# # Definition of commands ################################################################################# COMMANDS={ "create" => ["create_cluster_cmd", -2, "host1:port1 ... hostN:portN"], "check" => ["check_cluster_cmd", 2, "host:port"], "fix" => ["fix_cluster_cmd", 2, "host:port"], "reshard" => ["reshard_cluster_cmd", 2, "host:port"], "add-node" => ["addnode_cluster_cmd", 3, "new_host:new_port existing_host:existing_port"], "del-node" => ["delnode_cluster_cmd", 3, "host:port node_id"], "set-timeout" => ["set_timeout_cluster_cmd", 3, "host:port milliseconds"], "call" => ["call_cluster_cmd", -3, "host:port command arg arg .. arg"], "import" => ["import_cluster_cmd", 2, "host:port"], "help" => ["help_cluster_cmd", 1, "(show this help)"] } ALLOWED_OPTIONS={ "create" => {"replicas" => true, "password" => true}, "add-node" => {"slave" => false, "master-id" => true}, "import" => {"from" => :required, "copy" => false, "replace" => false}, "reshard" => {"from" => true, "to" => true, "slots" => true, "yes" => false} } def show_help puts "Usage: redis-trib \n\n" COMMANDS.each{|k,v| o = "" puts " #{k.ljust(15)} #{v[2]}" if ALLOWED_OPTIONS[k] ALLOWED_OPTIONS[k].each{|optname,has_arg| puts " --#{optname}" + (has_arg ? " " : "") } end } puts "\nFor check, fix, reshard, del-node, set-timeout you can specify the host and port of any working node in the cluster.\n" end # Sanity check if ARGV.length == 0 show_help exit 1 end rt = RedisTrib.new cmd_spec = COMMANDS[ARGV[0].downcase] if !cmd_spec puts "Unknown redis-trib subcommand '#{ARGV[0]}'" exit 1 end # Parse options cmd_options,first_non_option = rt.parse_options(ARGV[0].downcase) rt.check_arity(cmd_spec[1],ARGV.length-(first_non_option-1)) # Dispatch rt.send(cmd_spec[0],ARGV[first_non_option..-1],cmd_options) redis-py-cluster-2.0.0/tests/test_cluster_connection_pool.py000066400000000000000000000620721352661744600244550ustar00rootroot00000000000000# -*- coding: utf-8 -*- # python std lib import os import re import time from threading import Thread # rediscluster imports from rediscluster.connection import ( ClusterConnectionPool, ClusterReadOnlyConnectionPool, ClusterConnection, UnixDomainSocketConnection) from rediscluster.exceptions import RedisClusterException from tests.conftest import skip_if_server_version_lt # 3rd party imports import pytest import redis from mock import patch, Mock from redis.connection import ssl_available, to_bool from redis._compat import unicode class DummyConnection(object): description_format = "DummyConnection<>" def __init__(self, host="localhost", port=7000, socket_timeout=None, **kwargs): self.kwargs = kwargs self.pid = os.getpid() self.host = host self.port = port self.socket_timeout = socket_timeout class TestConnectionPool(object): def get_pool(self, connection_kwargs=None, max_connections=None, max_connections_per_node=None, connection_class=DummyConnection, init_slot_cache=True): connection_kwargs = connection_kwargs or {} pool = ClusterConnectionPool( connection_class=connection_class, max_connections=max_connections, max_connections_per_node=max_connections_per_node, startup_nodes=[{"host": "127.0.0.1", "port": 7000}], init_slot_cache=init_slot_cache, **connection_kwargs) return pool def test_in_use_not_exists(self): """ Test that if for some reason, the node that it tries to get the connectino for do not exists in the _in_use_connection variable. """ pool = self.get_pool() pool._in_use_connections = {} pool.get_connection("pubsub", channel="foobar") def test_connection_creation(self): connection_kwargs = {'foo': 'bar', 'biz': 'baz'} pool = self.get_pool(connection_kwargs=connection_kwargs) connection = pool.get_connection_by_node({"host": "127.0.0.1", "port": 7000}) assert isinstance(connection, DummyConnection) assert connection.kwargs == connection_kwargs def test_multiple_connections(self): pool = self.get_pool() c1 = pool.get_connection_by_node({"host": "127.0.0.1", "port": 7000}) c2 = pool.get_connection_by_node({"host": "127.0.0.1", "port": 7001}) assert c1 != c2 def test_max_connections(self): pool = self.get_pool(max_connections=2) pool.get_connection_by_node({"host": "127.0.0.1", "port": 7000}) pool.get_connection_by_node({"host": "127.0.0.1", "port": 7001}) with pytest.raises(RedisClusterException): pool.get_connection_by_node({"host": "127.0.0.1", "port": 7000}) def test_max_connections_per_node(self): pool = self.get_pool(max_connections=2, max_connections_per_node=True) pool.get_connection_by_node({"host": "127.0.0.1", "port": 7000}) pool.get_connection_by_node({"host": "127.0.0.1", "port": 7001}) pool.get_connection_by_node({"host": "127.0.0.1", "port": 7000}) pool.get_connection_by_node({"host": "127.0.0.1", "port": 7001}) with pytest.raises(RedisClusterException): pool.get_connection_by_node({"host": "127.0.0.1", "port": 7000}) def test_max_connections_default_setting(self): pool = self.get_pool(max_connections=None) assert pool.max_connections == 2 ** 31 def test_reuse_previously_released_connection(self): pool = self.get_pool() c1 = pool.get_connection_by_node({"host": "127.0.0.1", "port": 7000}) pool.release(c1) c2 = pool.get_connection_by_node({"host": "127.0.0.1", "port": 7000}) assert c1 == c2 def test_repr_contains_db_info_tcp(self): """ Note: init_slot_cache muts be set to false otherwise it will try to query the test server for data and then it can't be predicted reliably """ connection_kwargs = {'host': 'localhost', 'port': 7000} pool = self.get_pool(connection_kwargs=connection_kwargs, connection_class=ClusterConnection, init_slot_cache=False) expected = 'ClusterConnectionPool>' assert repr(pool) == expected def test_repr_contains_db_info_unix(self): """ Note: init_slot_cache muts be set to false otherwise it will try to query the test server for data and then it can't be predicted reliably """ connection_kwargs = {'path': '/abc', 'db': 1} pool = self.get_pool(connection_kwargs=connection_kwargs, connection_class=UnixDomainSocketConnection, init_slot_cache=False) expected = 'ClusterConnectionPool>' assert repr(pool) == expected def test_get_connection_by_key(self): """ This test assumes that when hashing key 'foo' will be sent to server with port 7002 """ pool = self.get_pool(connection_kwargs={}) # Patch the call that is made inside the method to allow control of the returned connection object with patch.object(ClusterConnectionPool, 'get_connection_by_slot', autospec=True) as pool_mock: def side_effect(self, *args, **kwargs): return DummyConnection(port=1337) pool_mock.side_effect = side_effect connection = pool.get_connection_by_key("foo", 'GET') assert connection.port == 1337 with pytest.raises(RedisClusterException) as ex: pool.get_connection_by_key(None, None) assert unicode(ex.value).startswith("No way to dispatch this command to Redis Cluster."), True def test_get_connection_by_slot(self): """ This test assumes that when doing keyslot operation on "foo" it will return 12182 """ pool = self.get_pool(connection_kwargs={}) # Patch the call that is made inside the method to allow control of the returned connection object with patch.object(ClusterConnectionPool, 'get_connection_by_node', autospec=True) as pool_mock: def side_effect(self, *args, **kwargs): return DummyConnection(port=1337) pool_mock.side_effect = side_effect connection = pool.get_connection_by_slot(12182) assert connection.port == 1337 m = Mock() pool.get_random_connection = m # If None value is provided then a random node should be tried/returned pool.get_connection_by_slot(None) m.assert_called_once_with() def test_get_connection_blocked(self): """ Currently get_connection() should only be used by pubsub command. All other commands should be blocked and exception raised. """ pool = self.get_pool() with pytest.raises(RedisClusterException) as ex: pool.get_connection("GET") assert unicode(ex.value).startswith("Only 'pubsub' commands can be used by get_connection()") def test_master_node_by_slot(self): pool = self.get_pool(connection_kwargs={}) node = pool.get_master_node_by_slot(0) node['port'] = 7000 node = pool.get_master_node_by_slot(12182) node['port'] = 7002 class TestReadOnlyConnectionPool(object): def get_pool(self, connection_kwargs=None, max_connections=None, init_slot_cache=True, startup_nodes=None): startup_nodes = startup_nodes or [{'host': '127.0.0.1', 'port': 7000}] connection_kwargs = connection_kwargs or {} pool = ClusterReadOnlyConnectionPool( init_slot_cache=init_slot_cache, max_connections=max_connections, startup_nodes=startup_nodes, **connection_kwargs) return pool def test_repr_contains_db_info_readonly(self): """ Note: init_slot_cache must be set to false otherwise it will try to query the test server for data and then it can't be predicted reliably """ pool = self.get_pool( init_slot_cache=False, startup_nodes=[{"host": "127.0.0.1", "port": 7000}, {"host": "127.0.0.2", "port": 7001}], ) expected = 'ClusterReadOnlyConnectionPool, ClusterConnection>' assert repr(pool) == expected def test_max_connections(self): pool = self.get_pool(max_connections=2) pool.get_connection_by_node({"host": "127.0.0.1", "port": 7000}) pool.get_connection_by_node({"host": "127.0.0.1", "port": 7001}) with pytest.raises(RedisClusterException): pool.get_connection_by_node({"host": "127.0.0.1", "port": 7000}) def test_get_connection_by_slot(self): """ """ pool = self.get_pool(connection_kwargs={}) # Patch the call that is made inside the method to allow control of the returned connection object with patch.object(ClusterReadOnlyConnectionPool, 'get_master_connection_by_slot', autospec=True) as pool_mock: def side_effect(self, *args, **kwargs): return DummyConnection(port=1337) pool_mock.side_effect = side_effect # Try a master only command connection = pool.get_connection_by_key("foo", 'ZSCAN') assert connection.port == 1337 with patch.object(ClusterReadOnlyConnectionPool, 'get_random_master_slave_connection_by_slot', autospec=True) as pool_mock: def side_effect(self, *args, **kwargs): return DummyConnection(port=1337) pool_mock.side_effect = side_effect # try a random node command connection = pool.get_connection_by_key('foo', 'GET') assert connection.port == 1337 with pytest.raises(RedisClusterException) as ex: pool.get_connection_by_key(None, None) assert unicode(ex.value).startswith("No way to dispatch this command to Redis Cluster."), True def test_get_node_by_slot_random(self): """ We can randomly get all nodes in readonly mode. """ pool = self.get_pool(connection_kwargs={}) # Set the values that we expect to be set for the NodeManager. Represents 2 nodes for 1 specific slot pool.nodes.slots[0] = [ {'host': '172.20.0.2', 'port': 7000, 'name': '172.20.0.2:7000', 'server_type': 'master'}, {'host': '172.20.0.2', 'port': 7003, 'name': '172.20.0.2:7003', 'server_type': 'slave'}, ] expected_ports = {7000, 7003} actual_ports = set() for _ in range(0, 100): node = pool.get_node_by_slot_random(0) actual_ports.add(node['port']) assert actual_ports == expected_ports class TestBlockingConnectionPool(object): def get_pool(self, connection_kwargs=None, max_connections=10, timeout=20): connection_kwargs = connection_kwargs or {} pool = redis.BlockingConnectionPool(connection_class=DummyConnection, max_connections=max_connections, timeout=timeout, **connection_kwargs) return pool def test_connection_creation(self): connection_kwargs = {'foo': 'bar', 'biz': 'baz'} pool = self.get_pool(connection_kwargs=connection_kwargs) connection = pool.get_connection('_') assert isinstance(connection, DummyConnection) assert connection.kwargs == connection_kwargs def test_multiple_connections(self): pool = self.get_pool() c1 = pool.get_connection('_') c2 = pool.get_connection('_') assert c1 != c2 def test_connection_pool_blocks_until_timeout(self): "When out of connections, block for timeout seconds, then raise" pool = self.get_pool(max_connections=1, timeout=0.1) pool.get_connection('_') start = time.time() with pytest.raises(redis.ConnectionError): pool.get_connection('_') # we should have waited at least 0.1 seconds assert time.time() - start >= 0.1 def connection_pool_blocks_until_another_connection_released(self): """ When out of connections, block until another connection is released to the pool """ pool = self.get_pool(max_connections=1, timeout=2) c1 = pool.get_connection('_') def target(): time.sleep(0.1) pool.release(c1) Thread(target=target).start() start = time.time() pool.get_connection('_') assert time.time() - start >= 0.1 def test_reuse_previously_released_connection(self): pool = self.get_pool() c1 = pool.get_connection('_') pool.release(c1) c2 = pool.get_connection('_') assert c1 == c2 def test_repr_contains_db_info_tcp(self): pool = redis.ConnectionPool(host='localhost', port=6379, db=0) expected = 'ConnectionPool>' assert repr(pool) == expected def test_repr_contains_db_info_unix(self): pool = redis.ConnectionPool( connection_class=redis.UnixDomainSocketConnection, path='abc', db=0, ) expected = 'ConnectionPool>' assert repr(pool) == expected class TestConnectionPoolURLParsing(object): def test_defaults(self): pool = redis.ConnectionPool.from_url('redis://localhost') assert pool.connection_class == redis.Connection assert pool.connection_kwargs == { 'host': 'localhost', 'port': 6379, 'db': 0, 'password': None, } def test_hostname(self): pool = redis.ConnectionPool.from_url('redis://myhost') assert pool.connection_class == redis.Connection assert pool.connection_kwargs == { 'host': 'myhost', 'port': 6379, 'db': 0, 'password': None, } def test_quoted_hostname(self): pool = redis.ConnectionPool.from_url('redis://my %2F host %2B%3D+', decode_components=True) assert pool.connection_class == redis.Connection assert pool.connection_kwargs == { 'host': 'my / host +=+', 'port': 6379, 'db': 0, 'password': None, } def test_port(self): pool = redis.ConnectionPool.from_url('redis://localhost:6380') assert pool.connection_class == redis.Connection assert pool.connection_kwargs == { 'host': 'localhost', 'port': 6380, 'db': 0, 'password': None, } def test_password(self): pool = redis.ConnectionPool.from_url('redis://:mypassword@localhost') assert pool.connection_class == redis.Connection assert pool.connection_kwargs == { 'host': 'localhost', 'port': 6379, 'db': 0, 'password': 'mypassword', } def test_quoted_password(self): pool = redis.ConnectionPool.from_url( 'redis://:%2Fmypass%2F%2B word%3D%24+@localhost', decode_components=True) assert pool.connection_class == redis.Connection assert pool.connection_kwargs == { 'host': 'localhost', 'port': 6379, 'db': 0, 'password': '/mypass/+ word=$+', } def test_quoted_path(self): pool = redis.ConnectionPool.from_url( 'unix://:mypassword@/my%2Fpath%2Fto%2F..%2F+_%2B%3D%24ocket', decode_components=True) assert pool.connection_class == redis.UnixDomainSocketConnection assert pool.connection_kwargs == { 'path': '/my/path/to/../+_+=$ocket', 'db': 0, 'password': 'mypassword', } def test_db_as_argument(self): pool = redis.ConnectionPool.from_url('redis://localhost', db='1') assert pool.connection_class == redis.Connection assert pool.connection_kwargs == { 'host': 'localhost', 'port': 6379, 'db': 1, 'password': None, } def test_db_in_path(self): pool = redis.ConnectionPool.from_url('redis://localhost/2', db='1') assert pool.connection_class == redis.Connection assert pool.connection_kwargs == { 'host': 'localhost', 'port': 6379, 'db': 2, 'password': None, } def test_db_in_querystring(self): pool = redis.ConnectionPool.from_url('redis://localhost/2?db=3', db='1') assert pool.connection_class == redis.Connection assert pool.connection_kwargs == { 'host': 'localhost', 'port': 6379, 'db': 3, 'password': None, } def test_extra_typed_querystring_options(self): pool = redis.ConnectionPool.from_url( 'redis://localhost/2?socket_timeout=20&socket_connect_timeout=10' '&socket_keepalive=&retry_on_timeout=Yes&max_connections=10' ) assert pool.connection_class == redis.Connection assert pool.connection_kwargs == { 'host': 'localhost', 'port': 6379, 'db': 2, 'socket_timeout': 20.0, 'socket_connect_timeout': 10.0, 'retry_on_timeout': True, 'password': None, } assert pool.max_connections == 10 def test_boolean_parsing(self): for expected, value in ( (None, None), (None, ''), (False, 0), (False, '0'), (False, 'f'), (False, 'F'), (False, 'False'), (False, 'n'), (False, 'N'), (False, 'No'), (True, 1), (True, '1'), (True, 'y'), (True, 'Y'), (True, 'Yes'), ): assert expected is to_bool(value) def test_extra_querystring_options(self): pool = redis.ConnectionPool.from_url('redis://localhost?a=1&b=2') assert pool.connection_class == redis.Connection assert pool.connection_kwargs == { 'host': 'localhost', 'port': 6379, 'db': 0, 'password': None, 'a': '1', 'b': '2' } def test_calling_from_subclass_returns_correct_instance(self): pool = redis.BlockingConnectionPool.from_url('redis://localhost') assert isinstance(pool, redis.BlockingConnectionPool) def test_client_creates_connection_pool(self): r = redis.Redis.from_url('redis://myhost') assert r.connection_pool.connection_class == redis.Connection assert r.connection_pool.connection_kwargs == { 'host': 'myhost', 'port': 6379, 'db': 0, 'password': None, } class TestConnectionPoolUnixSocketURLParsing(object): def test_defaults(self): pool = redis.ConnectionPool.from_url('unix:///socket') assert pool.connection_class == redis.UnixDomainSocketConnection assert pool.connection_kwargs == { 'path': '/socket', 'db': 0, 'password': None, } def test_password(self): pool = redis.ConnectionPool.from_url('unix://:mypassword@/socket') assert pool.connection_class == redis.UnixDomainSocketConnection assert pool.connection_kwargs == { 'path': '/socket', 'db': 0, 'password': 'mypassword', } def test_db_as_argument(self): pool = redis.ConnectionPool.from_url('unix:///socket', db=1) assert pool.connection_class == redis.UnixDomainSocketConnection assert pool.connection_kwargs == { 'path': '/socket', 'db': 1, 'password': None, } def test_db_in_querystring(self): pool = redis.ConnectionPool.from_url('unix:///socket?db=2', db=1) assert pool.connection_class == redis.UnixDomainSocketConnection assert pool.connection_kwargs == { 'path': '/socket', 'db': 2, 'password': None, } def test_extra_querystring_options(self): pool = redis.ConnectionPool.from_url('unix:///socket?a=1&b=2') assert pool.connection_class == redis.UnixDomainSocketConnection assert pool.connection_kwargs == { 'path': '/socket', 'db': 0, 'password': None, 'a': '1', 'b': '2' } class TestSSLConnectionURLParsing(object): @pytest.mark.skipif(not ssl_available, reason="SSL not installed") def test_defaults(self): pool = redis.ConnectionPool.from_url('rediss://localhost') assert pool.connection_class == redis.SSLConnection assert pool.connection_kwargs == { 'host': 'localhost', 'port': 6379, 'db': 0, 'password': None, } @pytest.mark.skipif(not ssl_available, reason="SSL not installed") def test_cert_reqs_options(self): import ssl pool = redis.ConnectionPool.from_url('rediss://?ssl_cert_reqs=none') assert pool.get_connection('_').cert_reqs == ssl.CERT_NONE pool = redis.ConnectionPool.from_url( 'rediss://?ssl_cert_reqs=optional') assert pool.get_connection('_').cert_reqs == ssl.CERT_OPTIONAL pool = redis.ConnectionPool.from_url( 'rediss://?ssl_cert_reqs=required') assert pool.get_connection('_').cert_reqs == ssl.CERT_REQUIRED class TestConnection(object): def test_on_connect_error(self): """ An error in Connection.on_connect should disconnect from the server see for details: https://github.com/andymccurdy/redis-py/issues/368 """ # this assumes the Redis server being tested against doesn't have # 9999 databases ;) bad_connection = redis.Redis(db=9999) # an error should be raised on connect with pytest.raises(redis.RedisError): bad_connection.info() pool = bad_connection.connection_pool assert len(pool._available_connections) == 1 assert not pool._available_connections[0]._sock @skip_if_server_version_lt('2.8.8') def test_busy_loading_disconnects_socket(self, r): """ If Redis raises a LOADING error, the connection should be disconnected and a BusyLoadingError raised """ with pytest.raises(redis.BusyLoadingError): r.execute_command('DEBUG', 'ERROR', 'LOADING fake message') # TODO: Sinc we have to query the cluster before we send this DEBUG command # we will have more then 1 connection in our pool and asserting 1 connection will # not work. pool = r.connection_pool assert len(pool._available_connections) >= 1 # assert not pool._available_connections[0]._sock @pytest.mark.xfail(reason="pipeline NYI") @skip_if_server_version_lt('2.8.8') def test_busy_loading_from_pipeline_immediate_command(self, r): """ BusyLoadingErrors should raise from Pipelines that execute a command immediately, like WATCH does. """ pipe = r.pipeline() with pytest.raises(redis.BusyLoadingError): pipe.immediate_execute_command('DEBUG', 'ERROR', 'LOADING fake message') pool = r.connection_pool assert not pipe.connection assert len(pool._available_connections) == 1 assert not pool._available_connections[0]._sock @pytest.mark.xfail(reason="pipeline NYI") @skip_if_server_version_lt('2.8.8') def test_busy_loading_from_pipeline(self, r): """ BusyLoadingErrors should be raised from a pipeline execution regardless of the raise_on_error flag. """ pipe = r.pipeline() pipe.execute_command('DEBUG', 'ERROR', 'LOADING fake message') with pytest.raises(redis.BusyLoadingError): pipe.execute() pool = r.connection_pool assert not pipe.connection assert len(pool._available_connections) == 1 assert not pool._available_connections[0]._sock @skip_if_server_version_lt('2.8.8') def test_read_only_error(self, r): "READONLY errors get turned in ReadOnlyError exceptions" with pytest.raises(redis.ReadOnlyError): r.execute_command('DEBUG', 'ERROR', 'READONLY blah blah') def test_connect_from_url_tcp(self): connection = redis.Redis.from_url('redis://localhost') pool = connection.connection_pool assert re.match('(.*)<(.*)<(.*)>>', repr(pool)).groups() == ( 'ConnectionPool', 'Connection', 'host=localhost,port=6379,db=0', ) def test_connect_from_url_unix(self): connection = redis.Redis.from_url('unix:///path/to/socket') pool = connection.connection_pool assert re.match('(.*)<(.*)<(.*)>>', repr(pool)).groups() == ( 'ConnectionPool', 'UnixDomainSocketConnection', 'path=/path/to/socket,db=0', ) redis-py-cluster-2.0.0/tests/test_cluster_obj.py000066400000000000000000000447651352661744600220500ustar00rootroot00000000000000# -*- coding: utf-8 -*- # python std lib from __future__ import with_statement import re import time # rediscluster imports from rediscluster import RedisCluster from rediscluster.connection import ClusterConnectionPool, ClusterReadOnlyConnectionPool from rediscluster.exceptions import ( RedisClusterException, MovedError, AskError, ClusterDownError, ) from rediscluster.nodemanager import NodeManager from tests.conftest import _get_client, skip_if_server_version_lt, skip_if_not_password_protected_nodes # 3rd party imports from mock import patch, Mock, MagicMock from redis._compat import unicode from redis import Redis import pytest pytestmark = skip_if_server_version_lt('2.9.0') class DummyConnectionPool(ClusterConnectionPool): pass class DummyConnection(object): pass def get_mocked_redis_client(*args, **kwargs): """ Return a stable RedisCluster object that have deterministic nodes and slots setup to remove the problem of different IP addresses on different installations and machines. """ with patch.object(Redis, 'execute_command') as execute_command_mock: def execute_command(self, *_args, **_kwargs): if _args[0] == 'slots': mock_cluster_slots = [ [ 0, 5460, ['127.0.0.1', 7000, 'node_0'], ['127.0.0.1', 7004, 'node_4'] ], [ 5461, 10922, ['127.0.0.1', 7001, 'node_1'], ['127.0.0.1', 7005, 'node_5'] ], [ 10923, 16383, ['127.0.0.1', 7002, 'node_2'], ['127.0.0.1', 7003, '2node_3'] ] ] return mock_cluster_slots elif _args[0] == 'cluster-require-full-coverage': return {'cluster-require-full-coverage': 'yes'} execute_command_mock.side_effect = execute_command return RedisCluster(*args, **kwargs) def test_representation(r): assert re.search('^RedisCluster<[a-z0-9\.\:\,].+>$', str(r)) def test_blocked_strict_redis_args(): """ Some arguments should explicitly be blocked because they will not work in a cluster setup """ params = {'startup_nodes': [{'host': '127.0.0.1', 'port': 7000}]} c = RedisCluster(**params) assert c.connection_pool.connection_kwargs["socket_timeout"] == ClusterConnectionPool.RedisClusterDefaultTimeout with pytest.raises(RedisClusterException) as ex: _get_client(RedisCluster, db=1) assert unicode(ex.value).startswith("Argument 'db' is not possible to use in cluster mode") @skip_if_not_password_protected_nodes() def test_password_procted_nodes(): """ Test that it is possible to connect to password protected nodes """ startup_nodes = [{"host": "127.0.0.1", "port": "7000"}] password_protected_startup_nodes = [{"host": "127.0.0.1", "port": "7100"}] with pytest.raises(RedisClusterException) as ex: _get_client(RedisCluster, startup_nodes=password_protected_startup_nodes) assert unicode(ex.value).startswith("ERROR sending 'cluster slots' command to redis server:") _get_client(RedisCluster, startup_nodes=password_protected_startup_nodes, password='password_is_protected') with pytest.raises(RedisClusterException) as ex: _get_client(RedisCluster, startup_nodes=startup_nodes, password='password_is_protected') assert unicode(ex.value).startswith("ERROR sending 'cluster slots' command to redis server:") _get_client(RedisCluster, startup_nodes=startup_nodes) def test_host_port_startup_node(): """ Test that it is possible to use host & port arguments as startup node args """ h = "192.168.0.1" p = 7000 c = RedisCluster(host=h, port=p, init_slot_cache=False) assert {"host": h, "port": p} in c.connection_pool.nodes.startup_nodes def test_empty_startup_nodes(): """ Test that exception is raised when empty providing empty startup_nodes """ with pytest.raises(RedisClusterException) as ex: r = RedisCluster(startup_nodes=[]) assert unicode(ex.value).startswith("No startup nodes provided"), unicode(ex.value) def test_readonly_instance(ro): """ Test that readonly_mode=True instance has ClusterReadOnlyConnectionPool """ assert isinstance(ro.connection_pool, ClusterReadOnlyConnectionPool) def test_custom_connectionpool(): """ Test that a custom connection pool will be used by RedisCluster """ h = "192.168.0.1" p = 7001 pool = DummyConnectionPool(host=h, port=p, connection_class=DummyConnection, startup_nodes=[{'host': h, 'port': p}], init_slot_cache=False) c = RedisCluster(connection_pool=pool, init_slot_cache=False) assert c.connection_pool is pool assert c.connection_pool.connection_class == DummyConnection assert {"host": h, "port": p} in c.connection_pool.nodes.startup_nodes @patch('rediscluster.nodemanager.Redis', new=MagicMock()) def test_skip_full_coverage_check(): """ Test if the cluster_require_full_coverage NodeManager method was not called with the flag activated """ c = RedisCluster("192.168.0.1", 7001, init_slot_cache=False, skip_full_coverage_check=True) c.connection_pool.nodes.cluster_require_full_coverage = MagicMock() c.connection_pool.nodes.initialize() assert not c.connection_pool.nodes.cluster_require_full_coverage.called def test_blocked_commands(r): """ These commands should be blocked and raise RedisClusterException """ blocked_commands = [ "CLIENT SETNAME", "SENTINEL GET-MASTER-ADDR-BY-NAME", 'SENTINEL MASTER', 'SENTINEL MASTERS', 'SENTINEL MONITOR', 'SENTINEL REMOVE', 'SENTINEL SENTINELS', 'SENTINEL SET', 'SENTINEL SLAVES', 'SHUTDOWN', 'SLAVEOF', 'SCRIPT KILL', 'MOVE', 'BITOP', ] for command in blocked_commands: try: r.execute_command(command) except RedisClusterException: pass else: raise AssertionError("'RedisClusterException' not raised for method : {0}".format(command)) def test_blocked_transaction(r): """ Method transaction is blocked/NYI and should raise exception on use """ with pytest.raises(RedisClusterException) as ex: r.transaction(None) assert unicode(ex.value).startswith("method RedisCluster.transaction() is not implemented"), unicode(ex.value) def test_cluster_of_one_instance(): """ Test a cluster that starts with only one redis server and ends up with one server. There is another redis server joining the cluster, hold slot 0, and eventually quit the cluster. The RedisCluster instance may get confused when slots mapping and nodes change during the test. """ with patch.object(RedisCluster, 'parse_response') as parse_response_mock: with patch.object(NodeManager, 'initialize', autospec=True) as init_mock: def side_effect(self, *args, **kwargs): def ok_call(self, *args, **kwargs): assert self.port == 7007 return "OK" parse_response_mock.side_effect = ok_call raise ClusterDownError('CLUSTERDOWN The cluster is down. Use CLUSTER INFO for more information') def side_effect_rebuild_slots_cache(self): # make new node cache that points to 7007 instead of 7006 self.nodes = [{'host': '127.0.0.1', 'server_type': 'master', 'port': 7006, 'name': '127.0.0.1:7006'}] self.slots = {} for i in range(0, 16383): self.slots[i] = [{ 'host': '127.0.0.1', 'server_type': 'master', 'port': 7006, 'name': '127.0.0.1:7006', }] # Second call should map all to 7007 def map_7007(self): self.nodes = [{'host': '127.0.0.1', 'server_type': 'master', 'port': 7007, 'name': '127.0.0.1:7007'}] self.slots = {} for i in range(0, 16383): self.slots[i] = [{ 'host': '127.0.0.1', 'server_type': 'master', 'port': 7007, 'name': '127.0.0.1:7007', }] # First call should map all to 7006 init_mock.side_effect = map_7007 parse_response_mock.side_effect = side_effect init_mock.side_effect = side_effect_rebuild_slots_cache rc = RedisCluster(host='127.0.0.1', port=7006) rc.set("foo", "bar") def test_execute_command_errors(r): """ If no command is given to `_determine_nodes` then exception should be raised. Test that if no key is provided then exception should be raised. """ with pytest.raises(RedisClusterException) as ex: r.execute_command() assert unicode(ex.value).startswith("Unable to determine command to use") with pytest.raises(RedisClusterException) as ex: r.execute_command("GET") assert unicode(ex.value).startswith("No way to dispatch this command to Redis Cluster. Missing key.") def test_refresh_table_asap(): """ If this variable is set externally, initialize() should be called. """ with patch.object(NodeManager, 'initialize') as mock_initialize: mock_initialize.return_value = None # Patch parse_response to avoid issues when the cluster sometimes return MOVED with patch.object(RedisCluster, 'parse_response') as mock_parse_response: def side_effect(self, *args, **kwargs): return None mock_parse_response.side_effect = side_effect r = RedisCluster(host="127.0.0.1", port=7000) r.connection_pool.nodes.slots[12182] = [{ "host": "127.0.0.1", "port": 7002, "name": "127.0.0.1:7002", "server_type": "master", }] r.refresh_table_asap = True i = len(mock_initialize.mock_calls) r.execute_command("SET", "foo", "bar") assert len(mock_initialize.mock_calls) - i == 1 assert r.refresh_table_asap is False def find_node_ip_based_on_port(cluster_client, port): for node_name, node_data in cluster_client.connection_pool.nodes.nodes.items(): if node_name.endswith(port): return node_data['host'] def test_ask_redirection(): """ Test that the server handles ASK response. At first call it should return a ASK ResponseError that will point the client to the next server it should talk to. Important thing to verify is that it tries to talk to the second node. """ r = RedisCluster(host="127.0.0.1", port=7000) r.connection_pool.nodes.nodes['127.0.0.1:7001'] = { 'host': u'127.0.0.1', 'server_type': 'master', 'port': 7001, 'name': '127.0.0.1:7001' } with patch.object(RedisCluster, 'parse_response') as parse_response: host_ip = find_node_ip_based_on_port(r, '7001') def ask_redirect_effect(connection, *args, **options): def ok_response(connection, *args, **options): assert connection.host == host_ip assert connection.port == 7001 return "MOCK_OK" parse_response.side_effect = ok_response raise AskError("1337 {0}:7001".format(host_ip)) parse_response.side_effect = ask_redirect_effect assert r.execute_command("SET", "foo", "bar") == "MOCK_OK" def test_pipeline_ask_redirection(): """ Test that the server handles ASK response when used in pipeline. At first call it should return a ASK ResponseError that will point the client to the next server it should talk to. Important thing to verify is that it tries to talk to the second node. """ r = get_mocked_redis_client(host="127.0.0.1", port=7000) with patch.object(RedisCluster, 'parse_response') as parse_response: def response(connection, *args, **options): def response(connection, *args, **options): def response(connection, *args, **options): assert connection.host == "127.0.0.1" assert connection.port == 7001 return "MOCK_OK" parse_response.side_effect = response raise AskError("12182 127.0.0.1:7001") parse_response.side_effect = response raise AskError("12182 127.0.0.1:7001") parse_response.side_effect = response p = r.pipeline() p.set("foo", "bar") assert p.execute() == ["MOCK_OK"] def test_moved_redirection(): """ Test that the client handles MOVED response. At first call it should return a MOVED ResponseError that will point the client to the next server it should talk to. Important thing to verify is that it tries to talk to the second node. """ r = get_mocked_redis_client(host="127.0.0.1", port=7000) m = Mock(autospec=True) def ask_redirect_effect(connection, *args, **options): def ok_response(connection, *args, **options): assert connection.host == "127.0.0.1" assert connection.port == 7002 return "MOCK_OK" m.side_effect = ok_response raise MovedError("12182 127.0.0.1:7002") m.side_effect = ask_redirect_effect r.parse_response = m assert r.set("foo", "bar") == "MOCK_OK" def test_moved_redirection_pipeline(): """ Test that the server handles MOVED response when used in pipeline. At first call it should return a MOVED ResponseError that will point the client to the next server it should talk to. Important thing to verify is that it tries to talk to the second node. """ with patch.object(RedisCluster, 'parse_response') as parse_response: def moved_redirect_effect(connection, *args, **options): def ok_response(connection, *args, **options): assert connection.host == "127.0.0.1" assert connection.port == 7002 return "MOCK_OK" parse_response.side_effect = ok_response raise MovedError("12182 127.0.0.1:7002") parse_response.side_effect = moved_redirect_effect # r = RedisCluster(host="127.0.0.1", port=7000) r = get_mocked_redis_client(host="127.0.0.1", port=7000) p = r.pipeline() p.set("foo", "bar") assert p.execute() == ["MOCK_OK"] def test_access_correct_slave_with_readonly_mode_client(sr): """ Test that the client can get value normally with readonly mode when we connect to correct slave. """ # we assume this key is set on 127.0.0.1:7000(7003) sr.set('foo16706', 'foo') import time time.sleep(1) with patch.object(ClusterReadOnlyConnectionPool, 'get_node_by_slot') as return_slave_mock: return_slave_mock.return_value = { 'name': '127.0.0.1:7003', 'host': '127.0.0.1', 'port': 7003, 'server_type': 'slave', } master_value = {'host': '127.0.0.1', 'name': '127.0.0.1:7000', 'port': 7000, 'server_type': 'master'} with patch.object( ClusterConnectionPool, 'get_master_node_by_slot', return_value=master_value) as return_master_mock: readonly_client = RedisCluster(host="127.0.0.1", port=7000, readonly_mode=True) assert b'foo' == readonly_client.get('foo16706') readonly_client = RedisCluster.from_url(url="redis://127.0.0.1:7000/0", readonly_mode=True) assert b'foo' == readonly_client.get('foo16706') def test_refresh_using_specific_nodes(r): """ Test making calls on specific nodes when the cluster has failed over to another node """ with patch.object(RedisCluster, 'parse_response') as parse_response_mock: with patch.object(NodeManager, 'initialize', autospec=True) as init_mock: # simulate 7006 as a failed node def side_effect(self, *args, **kwargs): if self.port == 7006: parse_response_mock.failed_calls += 1 raise ClusterDownError('CLUSTERDOWN The cluster is down. Use CLUSTER INFO for more information') elif self.port == 7007: parse_response_mock.successful_calls += 1 def side_effect_rebuild_slots_cache(self): # start with all slots mapped to 7006 self.nodes = {'127.0.0.1:7006': {'host': '127.0.0.1', 'server_type': 'master', 'port': 7006, 'name': '127.0.0.1:7006'}} self.slots = {} for i in range(0, 16383): self.slots[i] = [{ 'host': '127.0.0.1', 'server_type': 'master', 'port': 7006, 'name': '127.0.0.1:7006', }] # After the first connection fails, a reinitialize should follow the cluster to 7007 def map_7007(self): self.nodes = {'127.0.0.1:7007': {'host': '127.0.0.1', 'server_type': 'master', 'port': 7007, 'name': '127.0.0.1:7007'}} self.slots = {} for i in range(0, 16383): self.slots[i] = [{ 'host': '127.0.0.1', 'server_type': 'master', 'port': 7007, 'name': '127.0.0.1:7007', }] init_mock.side_effect = map_7007 parse_response_mock.side_effect = side_effect parse_response_mock.successful_calls = 0 parse_response_mock.failed_calls = 0 init_mock.side_effect = side_effect_rebuild_slots_cache rc = RedisCluster(host='127.0.0.1', port=7006) assert len(rc.connection_pool.nodes.nodes) == 1 assert '127.0.0.1:7006' in rc.connection_pool.nodes.nodes rc.ping() # Cluster should now point to 7006, and there should be one failed and one succesful call assert len(rc.connection_pool.nodes.nodes) == 1 assert '127.0.0.1:7007' in rc.connection_pool.nodes.nodes assert parse_response_mock.failed_calls == 1 assert parse_response_mock.successful_calls == 1 redis-py-cluster-2.0.0/tests/test_commands.py000066400000000000000000002742161352661744600213320ustar00rootroot00000000000000# -*- coding: utf-8 -*- # python std lib from __future__ import unicode_literals import datetime import re import time # rediscluster imports import rediscluster from rediscluster.exceptions import RedisClusterException, ClusterCrossSlotError from rediscluster.utils import dict_merge from tests.conftest import skip_if_server_version_lt, skip_if_redis_py_version_lt, skip_if_server_version_gte, skip_for_no_cluster_impl # 3rd party imports import pytest import redis from redis._compat import unichr, ascii_letters, iteritems, iterkeys, itervalues, unicode from redis.client import parse_info from redis.exceptions import ResponseError, DataError, RedisError, DataError from redis import exceptions @pytest.fixture() def slowlog(request, r): current_config = get_main_cluster_node_data(r.config_get()) old_slower_than_value = current_config['slowlog-log-slower-than'] old_max_legnth_value = current_config['slowlog-max-len'] def cleanup(): r.config_set('slowlog-log-slower-than', old_slower_than_value) r.config_set('slowlog-max-len', old_max_legnth_value) request.addfinalizer(cleanup) r.config_set('slowlog-log-slower-than', 0) r.config_set('slowlog-max-len', 128) def redis_server_time(client): all_clients_time = client.time() for server_id, server_time_data in all_clients_time.items(): if '7000' in server_id: seconds, milliseconds = server_time_data timestamp = float('%s.%s' % (seconds, milliseconds)) return datetime.datetime.fromtimestamp(timestamp) def get_stream_message(client, stream, message_id): "Fetch a stream message and format it as a (message_id, fields) pair" response = client.xrange(stream, min=message_id, max=message_id) assert len(response) == 1 return response[0] def get_main_cluster_node_data(command_result): """ Tries to find whatever node is running on port :7000 in the cluster resonse """ for node_id, node_data in command_result.items(): if '7000' in node_id: return node_data return None # RESPONSE CALLBACKS class TestResponseCallbacks(object): "Tests for the response callback system" def test_response_callbacks(self, r): all_response_callbacks = dict_merge( rediscluster.RedisCluster.RESPONSE_CALLBACKS, rediscluster.RedisCluster.CLUSTER_COMMANDS_RESPONSE_CALLBACKS, ) assert r.response_callbacks == all_response_callbacks assert id(r.response_callbacks) != id(all_response_callbacks) r.set_response_callback('GET', lambda x: 'static') r['a'] = 'foo' assert r['a'] == 'static' class TestRedisCommands(object): def test_command_on_invalid_key_type(self, r): r.lpush('a', '1') with pytest.raises(redis.ResponseError): r['a'] # SERVER INFORMATION def test_client_list(self, r): clients = r.client_list() client_data = get_main_cluster_node_data(clients)[0] assert isinstance(client_data, dict) assert 'addr' in client_data @skip_if_server_version_lt('5.0.0') def test_client_list_type(self, r): with pytest.raises(exceptions.RedisError): r.client_list(_type='not a client type') for client_type in ['normal', 'master', 'replica', 'pubsub']: clients = get_main_cluster_node_data(r.client_list(_type=client_type)) assert isinstance(clients, list) @skip_if_server_version_lt('5.0.0') def test_client_id(self, r): assert get_main_cluster_node_data(r.client_id()) > 0 @skip_if_server_version_lt('5.0.0') def test_client_unblock(self, r): myid = get_main_cluster_node_data(r.client_id()) assert not r.client_unblock(myid) assert not r.client_unblock(myid, error=True) assert not r.client_unblock(myid, error=False) @skip_if_server_version_lt('2.6.9') def test_client_getname(self, r): assert get_main_cluster_node_data(r.client_getname()) is None @skip_if_server_version_lt('2.6.9') @skip_for_no_cluster_impl() def test_client_setname(self, r): assert r.client_setname('redis_py_test') assert r.client_getname() == 'redis_py_test' @skip_if_server_version_lt('2.6.9') @skip_for_no_cluster_impl() def test_client_list_after_client_setname(self, r): r.client_setname('redis_py_test') clients = r.client_list() # we don't know which client ours will be assert 'redis_py_test' in [c['name'] for c in clients] @skip_if_server_version_lt('2.9.50') def test_client_pause(self, r): assert r.client_pause(1) assert r.client_pause(timeout=1) with pytest.raises(exceptions.RedisError): r.client_pause(timeout='not an integer') def test_config_get(self, r): data = get_main_cluster_node_data(r.config_get()) assert 'maxmemory' in data assert data['maxmemory'].isdigit() def test_config_resetstat(self, r): r.ping() prior_commands_processed = int(get_main_cluster_node_data(r.info())['total_commands_processed']) assert prior_commands_processed >= 1 r.config_resetstat() reset_commands_processed = int(get_main_cluster_node_data(r.info())['total_commands_processed']) assert reset_commands_processed < prior_commands_processed def test_config_set(self, r): data = get_main_cluster_node_data(r.config_get()) rdbname = data['dbfilename'] try: assert r.config_set('dbfilename', 'redis_py_test.rdb') assert get_main_cluster_node_data(r.config_get())['dbfilename'] == 'redis_py_test.rdb' finally: assert r.config_set('dbfilename', rdbname) def test_dbsize(self, r): r['a'] = 'foo' r['b'] = 'bar' # Count all commands sent to the DB. Since we have one slave # for every master we will look for 4 and not 2 dbsize_sum = sum([db_size_count for node_id, db_size_count in r.dbsize().items()]) assert dbsize_sum == 4 def test_echo(self, r): assert get_main_cluster_node_data(r.echo('foo bar')) == b'foo bar' def test_info(self, r): r['a'] = 'foo' r['b'] = 'bar' info = get_main_cluster_node_data(r.info()) assert isinstance(info, dict) # We only have a "db0" in cluster mode and only one of the commands will bind to node :7000 assert info['db0']['keys'] == 1 # Sum all keys in all slots keys_sum = sum([node_data.get('db0', {}).get('keys', 0) for node_id, node_data in r.info().items()]) assert keys_sum == 4 def test_lastsave(self, r): assert isinstance(get_main_cluster_node_data(r.lastsave()), datetime.datetime) def test_object(self, r): r['a'] = 'foo' assert isinstance(r.object('refcount', 'a'), int) assert isinstance(r.object('idletime', 'a'), int) assert r.object('encoding', 'a') in (b'raw', b'embstr') assert r.object('idletime', 'invalid-key') is None def test_ping(self, r): assert r.ping() @skip_for_no_cluster_impl() def test_slowlog_get(self, r, slowlog): assert r.slowlog_reset() unicode_string = unichr(3456) + 'abcd' + unichr(3421) r.get(unicode_string) slowlog = get_main_cluster_node_data(r.slowlog_get()) assert isinstance(slowlog, list) commands = [log['command'] for log in slowlog] get_command = b' '.join((b'GET', unicode_string.encode('utf-8'))) assert get_command in commands assert b'SLOWLOG RESET' in commands # the order should be ['GET ', 'SLOWLOG RESET'], # but if other clients are executing commands at the same time, there # could be commands, before, between, or after, so just check that # the two we care about are in the appropriate order. assert commands.index(get_command) < commands.index(b'SLOWLOG RESET') # make sure other attributes are typed correctly assert isinstance(slowlog[0]['start_time'], int) assert isinstance(slowlog[0]['duration'], int) @skip_for_no_cluster_impl() def test_slowlog_get_limit(self, r, slowlog): assert r.slowlog_reset() r.get('foo') r.get('bar') slowlog = r.slowlog_get(1) assert isinstance(slowlog, list) commands = [log['command'] for log in slowlog] assert b'GET foo' not in commands assert b'GET bar' in commands @skip_for_no_cluster_impl() def test_slowlog_length(self, r, slowlog): r.get('foo') assert isinstance(r.slowlog_len(), int) @skip_if_server_version_lt('2.6.0') def test_time(self, r): t = get_main_cluster_node_data(r.time()) assert len(t) == 2 assert isinstance(t[0], int) assert isinstance(t[1], int) # BASIC KEY COMMANDS def test_append(self, r): assert r.append('a', 'a1') == 2 assert r['a'] == b'a1' assert r.append('a', 'a2') == 4 assert r['a'] == b'a1a2' @skip_if_server_version_lt('2.6.0') def test_bitcount(self, r): r.setbit('a', 5, True) assert r.bitcount('a') == 1 r.setbit('a', 6, True) assert r.bitcount('a') == 2 r.setbit('a', 5, False) assert r.bitcount('a') == 1 r.setbit('a', 9, True) r.setbit('a', 17, True) r.setbit('a', 25, True) r.setbit('a', 33, True) assert r.bitcount('a') == 5 assert r.bitcount('a', 0, -1) == 5 assert r.bitcount('a', 2, 3) == 2 assert r.bitcount('a', 2, -1) == 3 assert r.bitcount('a', -2, -1) == 2 assert r.bitcount('a', 1, 1) == 1 def test_bitop_not_supported(self, r): """ Validate that the command is blocked in cluster mode and throws an Exception """ r['a'] = '' with pytest.raises(RedisClusterException): r.bitop('not', 'r', 'a') @skip_if_server_version_lt('2.6.0') @skip_for_no_cluster_impl() def test_bitop_not_empty_string(self, r): r['a'] = '' r.bitop('not', 'r', 'a') assert r.get('r') is None @skip_if_server_version_lt('2.6.0') @skip_for_no_cluster_impl() def test_bitop_not(self, r): test_str = b'\xAA\x00\xFF\x55' correct = ~0xAA00FF55 & 0xFFFFFFFF r['a'] = test_str r.bitop('not', 'r', 'a') assert int(binascii.hexlify(r['r']), 16) == correct @skip_if_server_version_lt('2.6.0') @skip_for_no_cluster_impl() def test_bitop_not_in_place(self, r): test_str = b'\xAA\x00\xFF\x55' correct = ~0xAA00FF55 & 0xFFFFFFFF r['a'] = test_str r.bitop('not', 'a', 'a') assert int(binascii.hexlify(r['a']), 16) == correct @skip_if_server_version_lt('2.6.0') @skip_for_no_cluster_impl() def test_bitop_single_string(self, r): test_str = b'\x01\x02\xFF' r['a'] = test_str r.bitop('and', 'res1', 'a') r.bitop('or', 'res2', 'a') r.bitop('xor', 'res3', 'a') assert r['res1'] == test_str assert r['res2'] == test_str assert r['res3'] == test_str @skip_if_server_version_lt('2.6.0') @skip_for_no_cluster_impl() def test_bitop_string_operands(self, r): r['a'] = b'\x01\x02\xFF\xFF' r['b'] = b'\x01\x02\xFF' r.bitop('and', 'res1', 'a', 'b') r.bitop('or', 'res2', 'a', 'b') r.bitop('xor', 'res3', 'a', 'b') assert int(binascii.hexlify(r['res1']), 16) == 0x0102FF00 assert int(binascii.hexlify(r['res2']), 16) == 0x0102FFFF assert int(binascii.hexlify(r['res3']), 16) == 0x000000FF @skip_if_server_version_lt('2.8.7') def test_bitpos(self, r): key = 'key:bitpos' r.set(key, b'\xff\xf0\x00') assert r.bitpos(key, 0) == 12 assert r.bitpos(key, 0, 2, -1) == 16 assert r.bitpos(key, 0, -2, -1) == 12 r.set(key, b'\x00\xff\xf0') assert r.bitpos(key, 1, 0) == 8 assert r.bitpos(key, 1, 1) == 8 r.set(key, b'\x00\x00\x00') assert r.bitpos(key, 1) == -1 @skip_if_server_version_lt('2.8.7') def test_bitpos_wrong_arguments(self, r): key = 'key:bitpos:wrong:args' r.set(key, b'\xff\xf0\x00') with pytest.raises(exceptions.RedisError): r.bitpos(key, 0, end=1) == 12 with pytest.raises(exceptions.RedisError): r.bitpos(key, 7) == 12 def test_decr(self, r): assert r.decr('a') == -1 assert r['a'] == b'-1' assert r.decr('a') == -2 assert r['a'] == b'-2' assert r.decr('a', amount=5) == -7 assert r['a'] == b'-7' def test_decrby(self, r): assert r.decrby('a', amount=2) == -2 assert r.decrby('a', amount=3) == -5 assert r['a'] == b'-5' def test_delete(self, r): assert r.delete('a') == 0 r['a'] = 'foo' assert r.delete('a') == 1 def test_delete_with_multiple_keys(self, r): r['a'] = 'foo' r['b'] = 'bar' assert r.delete('a', 'b') == 2 assert r.get('a') is None assert r.get('b') is None def test_delitem(self, r): r['a'] = 'foo' del r['a'] assert r.get('a') is None @skip_if_server_version_lt('4.0.0') def test_unlink(self, r): assert r.unlink('a') == 0 r['a'] = 'foo' assert r.unlink('a') == 1 assert r.get('a') is None @skip_if_server_version_lt('4.0.0') @skip_for_no_cluster_impl() def test_unlink_with_multiple_keys(self, r): r['a'] = 'foo' r['b'] = 'bar' assert r.unlink('a', 'b') == 2 assert r.get('a') is None assert r.get('b') is None @skip_if_server_version_lt('2.6.0') def test_dump_and_restore(self, r): r['a'] = 'foo' dumped = r.dump('a') del r['a'] r.restore('a', 0, dumped) assert r['a'] == b'foo' @skip_if_server_version_lt('3.0.0') def test_dump_and_restore_and_replace(self, r): r['a'] = 'bar' dumped = r.dump('a') with pytest.raises(redis.ResponseError): r.restore('a', 0, dumped) r.restore('a', 0, dumped, replace=True) assert r['a'] == b'bar' def test_exists(self, r): assert r.exists('a') == 0 r['G0B96'] = 'foo' r['TEFX5'] = 'bar' assert r.exists('G0B96') == 1 assert r.exists('G0B96', 'TEFX5') == 2 def test_exists_contains(self, r): assert 'a' not in r r['a'] = 'foo' assert 'a' in r def test_expire(self, r): assert not r.expire('a', 10) r['a'] = 'foo' assert r.expire('a', 10) assert 0 < r.ttl('a') <= 10 assert r.persist('a') # the ttl command changes behavior in redis-2.8+ http://redis.io/commands/ttl assert r.ttl('a') == -1 def test_expireat_datetime(self, r): expire_at = redis_server_time(r) + datetime.timedelta(minutes=1) r['a'] = 'foo' assert r.expireat('a', expire_at) assert 0 < r.ttl('a') <= 61 def test_expireat_no_key(self, r): expire_at = redis_server_time(r) + datetime.timedelta(minutes=1) assert not r.expireat('a', expire_at) def test_expireat_unixtime(self, r): expire_at = redis_server_time(r) + datetime.timedelta(minutes=1) r['a'] = 'foo' expire_at_seconds = int(time.mktime(expire_at.timetuple())) assert r.expireat('a', expire_at_seconds) assert 0 < r.ttl('a') <= 61 def test_get_and_set(self, r): # get and set can't be tested independently of each other assert r.get('a') is None byte_string = b'value' integer = 5 unicode_string = unichr(3456) + 'abcd' + unichr(3421) assert r.set('byte_string', byte_string) assert r.set('integer', 5) assert r.set('unicode_string', unicode_string) assert r.get('byte_string') == byte_string assert r.get('integer') == str(integer).encode() assert r.get('unicode_string').decode('utf-8') == unicode_string def test_getitem_and_setitem(self, r): r['a'] = 'bar' assert r['a'] == b'bar' def test_getitem_raises_keyerror_for_missing_key(self, r): with pytest.raises(KeyError): r['a'] def test_getitem_does_not_raise_keyerror_for_empty_string(self, r): r['a'] = b"" assert r['a'] == b"" def test_get_set_bit(self, r): # no value assert not r.getbit('a', 5) # set bit 5 assert not r.setbit('a', 5, True) assert r.getbit('a', 5) # unset bit 4 assert not r.setbit('a', 4, False) assert not r.getbit('a', 4) # set bit 4 assert not r.setbit('a', 4, True) assert r.getbit('a', 4) # set bit 5 again assert r.setbit('a', 5, True) assert r.getbit('a', 5) def test_getrange(self, r): r['a'] = 'foo' assert r.getrange('a', 0, 0) == b'f' assert r.getrange('a', 0, 2) == b'foo' assert r.getrange('a', 3, 4) == b'' def test_getset(self, r): assert r.getset('a', 'foo') is None assert r.getset('a', 'bar') == b'foo' assert r.get('a') == b'bar' def test_incr(self, r): assert r.incr('a') == 1 assert r['a'] == b'1' assert r.incr('a') == 2 assert r['a'] == b'2' assert r.incr('a', amount=5) == 7 assert r['a'] == b'7' def test_incrby(self, r): assert r.incrby('a') == 1 assert r.incrby('a', 4) == 5 assert r['a'] == b'5' @skip_if_server_version_lt('2.6.0') def test_incrbyfloat(self, r): assert r.incrbyfloat('a') == 1.0 assert r['a'] == b'1' assert r.incrbyfloat('a', 1.1) == 2.1 assert float(r['a']) == float(2.1) def test_keys(self, r): assert r.keys() == [] keys_with_underscores = {b'test_a', b'test_b'} keys = keys_with_underscores.union({b'testc'}) for key in keys: r[key] = 1 assert set(r.keys(pattern='test_*')) == keys_with_underscores assert set(r.keys(pattern='test*')) == keys def test_mget(self, r): assert r.mget([]) == [] assert r.mget(['a', 'b']) == [None, None] r['a'] = '1' r['b'] = '2' r['c'] = '3' assert r.mget('a', 'other', 'b', 'c') == [b'1', None, b'2', b'3'] def test_mset(self, r): d = {'a': b'1', 'b': b'2', 'c': b'3'} assert r.mset(d) for k, v in iteritems(d): assert r[k] == v def test_msetnx(self, r): d = {'a': b'1', 'b': b'2', 'c': b'3'} assert r.msetnx(d) d2 = {'a': b'x', 'd': b'4'} assert not r.msetnx(d2) for k, v in iteritems(d): assert r[k] == v assert r.get('d') is None @skip_if_server_version_lt('2.6.0') def test_pexpire(self, r): assert not r.pexpire('a', 60000) r['a'] = 'foo' assert r.pexpire('a', 60000) assert 0 < r.pttl('a') <= 60000 assert r.persist('a') assert r.pttl('a') == -1 @skip_if_server_version_lt('2.6.0') def test_pexpireat_datetime(self, r): expire_at = redis_server_time(r) + datetime.timedelta(minutes=1) r['a'] = 'foo' assert r.pexpireat('a', expire_at) assert 0 < r.pttl('a') <= 61000 @skip_if_server_version_lt('2.6.0') def test_pexpireat_no_key(self, r): expire_at = redis_server_time(r) + datetime.timedelta(minutes=1) assert not r.pexpireat('a', expire_at) @skip_if_server_version_lt('2.6.0') def test_pexpireat_unixtime(self, r): expire_at = redis_server_time(r) + datetime.timedelta(minutes=1) r['a'] = 'foo' expire_at_seconds = int(time.mktime(expire_at.timetuple())) * 1000 assert r.pexpireat('a', expire_at_seconds) assert 0 < r.pttl('a') <= 61000 @skip_if_server_version_lt('2.6.0') def test_psetex(self, r): assert r.psetex('a', 1000, 'value') assert r['a'] == b'value' assert 0 < r.pttl('a') <= 1000 @skip_if_server_version_lt('2.6.0') def test_psetex_timedelta(self, r): expire_at = datetime.timedelta(milliseconds=1000) assert r.psetex('a', expire_at, 'value') assert r['a'] == b'value' assert 0 < r.pttl('a') <= 1000 @skip_if_server_version_lt('2.6.0') def test_pttl(self, r): assert not r.pexpire('a', 10000) r['a'] = '1' assert r.pexpire('a', 10000) assert 0 < r.pttl('a') <= 10000 assert r.persist('a') assert r.pttl('a') == -1 @skip_if_server_version_lt('2.8.0') def test_pttl_no_key(self, r): "PTTL on servers 2.8 and after return -2 when the key doesn't exist" assert r.pttl('a') == -2 def test_randomkey(self, r): assert r.randomkey() is None for key in ('a', 'b', 'c'): r[key] = 1 assert r.randomkey() in (b'a', b'b', b'c') def test_rename(self, r): r['a'] = '1' assert r.rename('a', 'b') assert r.get('a') is None assert r['b'] == b'1' def test_renamenx(self, r): r['a'] = '1' r['b'] = '2' assert not r.renamenx('a', 'b') assert r['a'] == b'1' assert r['b'] == b'2' @skip_if_server_version_lt('2.6.0') def test_set_nx(self, r): assert r.set('a', '1', nx=True) assert not r.set('a', '2', nx=True) assert r['a'] == b'1' @skip_if_server_version_lt('2.6.0') def test_set_xx(self, r): assert not r.set('a', '1', xx=True) assert r.get('a') is None r['a'] = 'bar' assert r.set('a', '2', xx=True) assert r.get('a') == b'2' @skip_if_server_version_lt('2.6.0') def test_set_px(self, r): assert r.set('a', '1', px=10000) assert r['a'] == b'1' assert 0 < r.pttl('a') <= 10000 assert 0 < r.ttl('a') <= 10 @skip_if_server_version_lt('2.6.0') def test_set_px_timedelta(self, r): expire_at = datetime.timedelta(milliseconds=1000) assert r.set('a', '1', px=expire_at) assert 0 < r.pttl('a') <= 1000 assert 0 < r.ttl('a') <= 1 @skip_if_server_version_lt('2.6.0') def test_set_ex(self, r): assert r.set('a', '1', ex=10) assert 0 < r.ttl('a') <= 10 @skip_if_server_version_lt('2.6.0') def test_set_ex_timedelta(self, r): expire_at = datetime.timedelta(seconds=60) assert r.set('a', '1', ex=expire_at) assert 0 < r.ttl('a') <= 60 @skip_if_server_version_lt('2.6.0') def test_set_multipleoptions(self, r): r['a'] = 'val' assert r.set('a', '1', xx=True, px=10000) assert 0 < r.ttl('a') <= 10 def test_setex(self, r): assert r.setex('a', 60, '1') assert r['a'] == b'1' assert 0 < r.ttl('a') <= 60 def test_setnx(self, r): assert r.setnx('a', '1') assert r['a'] == b'1' assert not r.setnx('a', '2') assert r['a'] == b'1' def test_setrange(self, r): assert r.setrange('a', 5, 'foo') == 8 assert r['a'] == b'\0\0\0\0\0foo' r['a'] = 'abcdefghijh' assert r.setrange('a', 6, '12345') == 11 assert r['a'] == b'abcdef12345' def test_strlen(self, r): r['a'] = 'foo' assert r.strlen('a') == 3 def test_substr(self, r): r['a'] = '0123456789' assert r.substr('a', 0) == b'0123456789' assert r.substr('a', 2) == b'23456789' assert r.substr('a', 3, 5) == b'345' assert r.substr('a', 3, -2) == b'345678' def test_ttl(self, r): r['a'] = '1' assert r.expire('a', 10) assert 0 < r.ttl('a') <= 10 assert r.persist('a') assert r.ttl('a') == -1 @skip_if_server_version_lt('2.8.0') def test_ttl_nokey(self, r): "TTL on servers 2.8 and after return -2 when the key doesn't exist" assert r.ttl('a') == -2 def test_type(self, r): assert r.type('a') == b'none' r['a'] = '1' assert r.type('a') == b'string' del r['a'] r.lpush('a', '1') assert r.type('a') == b'list' del r['a'] r.sadd('a', '1') assert r.type('a') == b'set' del r['a'] r.zadd('a', {'1': 1}) assert r.type('a') == b'zset' # LIST COMMANDS def test_blpop(self, r): """ Generated keys for slot 16299: ['0J8KD', '822JO', '8TJPT', 'HD644', 'SKUCM', 'N4N5Z', 'NRSWJ'] """ r.rpush('0J8KD', '1', '2') r.rpush('822JO', '3', '4') assert r.blpop(['822JO', '0J8KD'], timeout=1) == (b'822JO', b'3') assert r.blpop(['822JO', '0J8KD'], timeout=1) == (b'822JO', b'4') assert r.blpop(['822JO', '0J8KD'], timeout=1) == (b'0J8KD', b'1') assert r.blpop(['822JO', '0J8KD'], timeout=1) == (b'0J8KD', b'2') assert r.blpop(['822JO', '0J8KD'], timeout=1) is None r.rpush('c', '1') assert r.blpop('c', timeout=1) == (b'c', b'1') def test_brpop(self, r): """ Generated keys for slot 16299: ['0J8KD', '822JO', '8TJPT', 'HD644', 'SKUCM', 'N4N5Z', 'NRSWJ'] """ r.rpush('0J8KD', '1', '2') r.rpush('822JO', '3', '4') assert r.brpop(['822JO', '0J8KD'], timeout=1) == (b'822JO', b'4') assert r.brpop(['822JO', '0J8KD'], timeout=1) == (b'822JO', b'3') assert r.brpop(['822JO', '0J8KD'], timeout=1) == (b'0J8KD', b'2') assert r.brpop(['822JO', '0J8KD'], timeout=1) == (b'0J8KD', b'1') assert r.brpop(['822JO', '0J8KD'], timeout=1) is None r.rpush('c', '1') assert r.brpop('c', timeout=1) == (b'c', b'1') def test_brpoplpush(self, r): r.rpush('a', '1', '2') r.rpush('b', '3', '4') assert r.brpoplpush('a', 'b') == b'2' assert r.brpoplpush('a', 'b') == b'1' assert r.brpoplpush('a', 'b', timeout=1) is None assert r.lrange('a', 0, -1) == [] assert r.lrange('b', 0, -1) == [b'1', b'2', b'3', b'4'] def test_brpoplpush_empty_string(self, r): r.rpush('a', '') assert r.brpoplpush('a', 'b') == b'' def test_lindex(self, r): r.rpush('a', '1', '2', '3') assert r.lindex('a', '0') == b'1' assert r.lindex('a', '1') == b'2' assert r.lindex('a', '2') == b'3' def test_linsert(self, r): r.rpush('a', '1', '2', '3') assert r.linsert('a', 'after', '2', '2.5') == 4 assert r.lrange('a', 0, -1) == [b'1', b'2', b'2.5', b'3'] assert r.linsert('a', 'before', '2', '1.5') == 5 assert r.lrange('a', 0, -1) == \ [b'1', b'1.5', b'2', b'2.5', b'3'] def test_llen(self, r): r.rpush('a', '1', '2', '3') assert r.llen('a') == 3 def test_lpop(self, r): r.rpush('a', '1', '2', '3') assert r.lpop('a') == b'1' assert r.lpop('a') == b'2' assert r.lpop('a') == b'3' assert r.lpop('a') is None def test_lpush(self, r): assert r.lpush('a', '1') == 1 assert r.lpush('a', '2') == 2 assert r.lpush('a', '3', '4') == 4 assert r.lrange('a', 0, -1) == [b'4', b'3', b'2', b'1'] def test_lpushx(self, r): assert r.lpushx('a', '1') == 0 assert r.lrange('a', 0, -1) == [] r.rpush('a', '1', '2', '3') assert r.lpushx('a', '4') == 4 assert r.lrange('a', 0, -1) == [b'4', b'1', b'2', b'3'] def test_lrange(self, r): r.rpush('a', '1', '2', '3', '4', '5') assert r.lrange('a', 0, 2) == [b'1', b'2', b'3'] assert r.lrange('a', 2, 10) == [b'3', b'4', b'5'] assert r.lrange('a', 0, -1) == [b'1', b'2', b'3', b'4', b'5'] def test_lrem(self, r): r.rpush('a', 'Z', 'b', 'Z', 'Z', 'c', 'Z', 'Z') # remove the first 'Z' item assert r.lrem('a', 1, 'Z') == 1 assert r.lrange('a', 0, -1) == [b'b', b'Z', b'Z', b'c', b'Z', b'Z'] # remove the last 2 'Z' items assert r.lrem('a', -2, 'Z') == 2 assert r.lrange('a', 0, -1) == [b'b', b'Z', b'Z', b'c'] # remove all 'Z' items assert r.lrem('a', 0, 'Z') == 2 assert r.lrange('a', 0, -1) == [b'b', b'c'] def test_lset(self, r): r.rpush('a', '1', '2', '3') assert r.lrange('a', 0, -1) == [b'1', b'2', b'3'] assert r.lset('a', 1, '4') assert r.lrange('a', 0, 2) == [b'1', b'4', b'3'] def test_ltrim(self, r): r.rpush('a', '1', '2', '3') assert r.ltrim('a', 0, 1) assert r.lrange('a', 0, -1) == [b'1', b'2'] def test_rpop(self, r): r.rpush('a', '1', '2', '3') assert r.rpop('a') == b'3' assert r.rpop('a') == b'2' assert r.rpop('a') == b'1' assert r.rpop('a') is None def test_rpoplpush(self, r): r.rpush('a', 'a1', 'a2', 'a3') r.rpush('b', 'b1', 'b2', 'b3') assert r.rpoplpush('a', 'b') == b'a3' assert r.lrange('a', 0, -1) == [b'a1', b'a2'] assert r.lrange('b', 0, -1) == [b'a3', b'b1', b'b2', b'b3'] def test_rpush(self, r): assert r.rpush('a', '1') == 1 assert r.rpush('a', '2') == 2 assert r.rpush('a', '3', '4') == 4 assert r.lrange('a', 0, -1) == [b'1', b'2', b'3', b'4'] def test_rpushx(self, r): assert r.rpushx('a', 'b') == 0 assert r.lrange('a', 0, -1) == [] r.rpush('a', '1', '2', '3') assert r.rpushx('a', '4') == 4 assert r.lrange('a', 0, -1) == [b'1', b'2', b'3', b'4'] # SCAN COMMANDS @skip_if_server_version_lt('2.8.0') def test_scan(self, r): """ Generated keys for slot 0 : ['GQ5KU', 'IFWJL', 'X582D'] """ r.set('GQ5KU', 1) r.set('IFWJL', 2) r.set('X582D', 3) cursor, keys = get_main_cluster_node_data(r.scan()) assert cursor == 0 assert set(keys) == {b'GQ5KU', b'IFWJL', b'X582D'} _, keys = get_main_cluster_node_data(r.scan(match='GQ5KU')) assert set(keys) == {b'GQ5KU'} @skip_if_server_version_lt('2.8.0') def test_scan_iter(self, r): r.set('a', 1) r.set('b', 2) r.set('c', 3) keys = list(r.scan_iter()) assert set(keys) == {b'a', b'b', b'c'} keys = list(r.scan_iter(match='a')) assert set(keys) == {b'a'} @skip_if_server_version_lt('2.8.0') def test_sscan(self, r): r.sadd('a', 1, 2, 3) cursor, members = r.sscan('a') assert cursor == 0 assert set(members) == {b'1', b'2', b'3'} _, members = r.sscan('a', match=b'1') assert set(members) == {b'1'} @skip_if_server_version_lt('2.8.0') def test_sscan_iter(self, r): r.sadd('a', 1, 2, 3) members = list(r.sscan_iter('a')) assert set(members) == {b'1', b'2', b'3'} members = list(r.sscan_iter('a', match=b'1')) assert set(members) == {b'1'} @skip_if_server_version_lt('2.8.0') def test_hscan(self, r): r.hmset('a', {'a': 1, 'b': 2, 'c': 3}) cursor, dic = r.hscan('a') assert cursor == 0 assert dic == {b'a': b'1', b'b': b'2', b'c': b'3'} _, dic = r.hscan('a', match='a') assert dic == {b'a': b'1'} @skip_if_server_version_lt('2.8.0') def test_hscan_iter(self, r): r.hmset('a', {'a': 1, 'b': 2, 'c': 3}) dic = dict(r.hscan_iter('a')) assert dic == {b'a': b'1', b'b': b'2', b'c': b'3'} dic = dict(r.hscan_iter('a', match='a')) assert dic == {b'a': b'1'} @skip_if_server_version_lt('2.8.0') def test_zscan(self, r): r.zadd('a', {'a': 1, 'b': 2, 'c': 3}) cursor, pairs = r.zscan('a') assert cursor == 0 assert set(pairs) == {(b'a', 1), (b'b', 2), (b'c', 3)} _, pairs = r.zscan('a', match='a') assert set(pairs) == {(b'a', 1)} @skip_if_server_version_lt('2.8.0') def test_zscan_iter(self, r): r.zadd('a', {'a': 1, 'b': 2, 'c': 3}) pairs = list(r.zscan_iter('a')) assert set(pairs) == {(b'a', 1), (b'b', 2), (b'c', 3)} pairs = list(r.zscan_iter('a', match='a')) assert set(pairs) == {(b'a', 1)} # SET COMMANDS def test_sadd(self, r): members = {b'1', b'2', b'3'} r.sadd('a', *members) assert r.smembers('a') == members def test_scard(self, r): r.sadd('a', '1', '2', '3') assert r.scard('a') == 3 def test_sdiff(self, r): r.sadd('a', '1', '2', '3') assert r.sdiff('a', 'b') == {b'1', b'2', b'3'} r.sadd('b', '2', '3') assert r.sdiff('a', 'b') == {b'1'} def test_sdiffstore(self, r): r.sadd('a', '1', '2', '3') assert r.sdiffstore('c', 'a', 'b') == 3 assert r.smembers('c') == {b'1', b'2', b'3'} r.sadd('b', '2', '3') assert r.sdiffstore('c', 'a', 'b') == 1 assert r.smembers('c') == {b'1'} def test_sinter(self, r): r.sadd('a', '1', '2', '3') assert r.sinter('a', 'b') == set() r.sadd('b', '2', '3') assert r.sinter('a', 'b') == {b'2', b'3'} def test_sinterstore(self, r): r.sadd('a', '1', '2', '3') assert r.sinterstore('c', 'a', 'b') == 0 assert r.smembers('c') == set() r.sadd('b', '2', '3') assert r.sinterstore('c', 'a', 'b') == 2 assert r.smembers('c') == {b'2', b'3'} def test_sismember(self, r): r.sadd('a', '1', '2', '3') assert r.sismember('a', '1') assert r.sismember('a', '2') assert r.sismember('a', '3') assert not r.sismember('a', '4') def test_smembers(self, r): r.sadd('a', '1', '2', '3') assert r.smembers('a') == {b'1', b'2', b'3'} def test_smove(self, r): r.sadd('a', 'a1', 'a2') r.sadd('b', 'b1', 'b2') assert r.smove('a', 'b', 'a1') assert r.smembers('a') == {b'a2'} assert r.smembers('b') == {b'b1', b'b2', b'a1'} def test_spop(self, r): s = [b'1', b'2', b'3'] r.sadd('a', *s) value = r.spop('a') assert value in s assert r.smembers('a') == set(s) - {value} @skip_if_server_version_lt('3.2.0') def test_spop_multi_value(self, r): s = [b'1', b'2', b'3'] r.sadd('a', *s) values = r.spop('a', 2) assert len(values) == 2 for value in values: assert value in s assert r.spop('a', 1) == list(set(s) - set(values)) def test_srandmember(self, r): s = [b'1', b'2', b'3'] r.sadd('a', *s) assert r.srandmember('a') in s @skip_if_server_version_lt('2.6.0') def test_srandmember_multi_value(self, r): s = [b'1', b'2', b'3'] r.sadd('a', *s) randoms = r.srandmember('a', number=2) assert len(randoms) == 2 assert set(randoms).intersection(s) == set(randoms) def test_srem(self, r): r.sadd('a', '1', '2', '3', '4') assert r.srem('a', '5') == 0 assert r.srem('a', '2', '4') == 2 assert r.smembers('a') == {b'1', b'3'} def test_sunion(self, r): r.sadd('a', '1', '2') r.sadd('b', '2', '3') assert r.sunion('a', 'b') == {b'1', b'2', b'3'} def test_sunionstore(self, r): r.sadd('a', '1', '2') r.sadd('b', '2', '3') assert r.sunionstore('c', 'a', 'b') == 3 assert r.smembers('c') == {b'1', b'2', b'3'} # SORTED SET COMMANDS def test_zadd(self, r): mapping = {'a1': 1.0, 'a2': 2.0, 'a3': 3.0} r.zadd('a', mapping) assert r.zrange('a', 0, -1, withscores=True) == \ [(b'a1', 1.0), (b'a2', 2.0), (b'a3', 3.0)] # error cases with pytest.raises(exceptions.DataError): r.zadd('a', {}) # cannot use both nx and xx options with pytest.raises(exceptions.DataError): r.zadd('a', mapping, nx=True, xx=True) # cannot use the incr options with more than one value with pytest.raises(exceptions.DataError): r.zadd('a', mapping, incr=True) def test_zadd_nx(self, r): assert r.zadd('a', {'a1': 1}) == 1 assert r.zadd('a', {'a1': 99, 'a2': 2}, nx=True) == 1 assert r.zrange('a', 0, -1, withscores=True) == \ [(b'a1', 1.0), (b'a2', 2.0)] def test_zadd_xx(self, r): assert r.zadd('a', {'a1': 1}) == 1 assert r.zadd('a', {'a1': 99, 'a2': 2}, xx=True) == 0 assert r.zrange('a', 0, -1, withscores=True) == \ [(b'a1', 99.0)] def test_zadd_ch(self, r): assert r.zadd('a', {'a1': 1}) == 1 assert r.zadd('a', {'a1': 99, 'a2': 2}, ch=True) == 2 assert r.zrange('a', 0, -1, withscores=True) == \ [(b'a2', 2.0), (b'a1', 99.0)] def test_zadd_incr(self, r): assert r.zadd('a', {'a1': 1}) == 1 assert r.zadd('a', {'a1': 4.5}, incr=True) == 5.5 @skip_for_no_cluster_impl() def test_zadd_incr_with_xx(self, r): """ Generated keys for slot 0 : ['60ZE7', '8I2EQ', 'R8H1V', 'NJP6N', '0VI0A', '0CEIC', 'MV75A', 'TMKD9'] """ # this asks zadd to incr 'a1' only if it exists, but it clearly # doesn't. Redis returns a null value in this case and so should # redis-py assert r.zadd('a', {'a1': 1}, xx=True, incr=True) is None def test_zcard(self, r): r.zadd('a', {'a1': 1, 'a2': 2, 'a3': 3}) assert r.zcard('a') == 3 def test_zcount(self, r): r.zadd('a', {'a1': 1, 'a2': 2, 'a3': 3}) assert r.zcount('a', '-inf', '+inf') == 3 assert r.zcount('a', 1, 2) == 2 assert r.zcount('a', '(' + str(1), 2) == 1 assert r.zcount('a', 1, '(' + str(2)) == 1 assert r.zcount('a', 10, 20) == 0 def test_zincrby(self, r): r.zadd('a', {'a1': 1, 'a2': 2, 'a3': 3}) assert r.zincrby('a', 1, 'a2') == 3.0 assert r.zincrby('a', 5, 'a3') == 8.0 assert r.zscore('a', 'a2') == 3.0 assert r.zscore('a', 'a3') == 8.0 @skip_if_server_version_lt('2.8.9') def test_zlexcount(self, r): r.zadd('a', {'a': 0, 'b': 0, 'c': 0, 'd': 0, 'e': 0, 'f': 0, 'g': 0}) assert r.zlexcount('a', '-', '+') == 7 assert r.zlexcount('a', '[b', '[f') == 5 def test_zinterstore_sum(self, r): """ Generated keys for slot 0 : ['60ZE7', '8I2EQ', 'R8H1V', 'NJP6N', '0VI0A', '0CEIC', 'MV75A', 'TMKD9'] """ r.zadd('60ZE7', {'a1': 1, 'a2': 1, 'a3': 1}) r.zadd('8I2EQ', {'a1': 2, 'a2': 2, 'a3': 2}) r.zadd('R8H1V', {'a1': 6, 'a3': 5, 'a4': 4}) assert r.zinterstore('NJP6N', ['60ZE7', '8I2EQ', 'R8H1V']) == 2 assert r.zrange('NJP6N', 0, -1, withscores=True) == \ [(b'a3', 8), (b'a1', 9)] def test_zinterstore_max(self, r): """ Generated keys for slot 0 : ['60ZE7', '8I2EQ', 'R8H1V', 'NJP6N', '0VI0A', '0CEIC', 'MV75A', 'TMKD9'] """ r.zadd('60ZE7', {'a1': 1, 'a2': 1, 'a3': 1}) r.zadd('8I2EQ', {'a1': 2, 'a2': 2, 'a3': 2}) r.zadd('R8H1V', {'a1': 6, 'a3': 5, 'a4': 4}) assert r.zinterstore('NJP6N', ['60ZE7', '8I2EQ', 'R8H1V'], aggregate='MAX') == 2 assert r.zrange('NJP6N', 0, -1, withscores=True) == \ [(b'a3', 5), (b'a1', 6)] def test_zinterstore_min(self, r): """ Generated keys for slot 0 : ['60ZE7', '8I2EQ', 'R8H1V', 'NJP6N', '0VI0A', '0CEIC', 'MV75A', 'TMKD9'] """ r.zadd('60ZE7', {'a1': 1, 'a2': 2, 'a3': 3}) r.zadd('8I2EQ', {'a1': 2, 'a2': 3, 'a3': 5}) r.zadd('R8H1V', {'a1': 6, 'a3': 5, 'a4': 4}) assert r.zinterstore('NJP6N', ['60ZE7', '8I2EQ', 'R8H1V'], aggregate='MIN') == 2 assert r.zrange('NJP6N', 0, -1, withscores=True) == \ [(b'a1', 1), (b'a3', 3)] def test_zinterstore_with_weight(self, r): """ Generated keys for slot 0 : ['60ZE7', '8I2EQ', 'R8H1V', 'NJP6N', '0VI0A', '0CEIC', 'MV75A', 'TMKD9'] """ r.zadd('60ZE7', {'a1': 1, 'a2': 1, 'a3': 1}) r.zadd('8I2EQ', {'a1': 2, 'a2': 2, 'a3': 2}) r.zadd('R8H1V', {'a1': 6, 'a3': 5, 'a4': 4}) assert r.zinterstore('NJP6N', {'60ZE7': 1, '8I2EQ': 2, 'R8H1V': 3}) == 2 assert r.zrange('NJP6N', 0, -1, withscores=True) == \ [(b'a3', 20), (b'a1', 23)] @skip_if_server_version_lt('4.9.0') def test_zpopmax(self, r): """ Generated keys for slot 0 : ['60ZE7', '8I2EQ', 'R8H1V', 'NJP6N', '0VI0A', '0CEIC', 'MV75A', 'TMKD9'] """ r.zadd('60ZE7', {'a1': 1, 'a2': 2, 'a3': 3}) assert r.zpopmax('60ZE7') == [(b'a3', 3)] # with count assert r.zpopmax('60ZE7', count=2) == \ [(b'a2', 2), (b'a1', 1)] @skip_if_server_version_lt('4.9.0') def test_zpopmin(self, r): """ Generated keys for slot 0 : ['60ZE7', '8I2EQ', 'R8H1V', 'NJP6N', '0VI0A', '0CEIC', 'MV75A', 'TMKD9'] """ r.zadd('60ZE7', {'a1': 1, 'a2': 2, 'a3': 3}) assert r.zpopmin('60ZE7') == [(b'a1', 1)] # with count assert r.zpopmin('60ZE7', count=2) == \ [(b'a2', 2), (b'a3', 3)] @skip_if_server_version_lt('4.9.0') @skip_for_no_cluster_impl() def test_bzpopmax(self, r): """ Generated keys for slot 0 : ['60ZE7', '8I2EQ', 'R8H1V', 'NJP6N', '0VI0A', '0CEIC', 'MV75A', 'TMKD9'] """ r.zadd('60ZE7', {'a1': 1, 'a2': 2}) r.zadd('8I2EQ', {'b1': 10, 'b2': 20}) assert r.bzpopmax(['8I2EQ', '60ZE7'], timeout=1) == (b'b', b'b2', 20) assert r.bzpopmax(['8I2EQ', '60ZE7'], timeout=1) == (b'b', b'b1', 10) assert r.bzpopmax(['8I2EQ', '60ZE7'], timeout=1) == (b'a', b'a2', 2) assert r.bzpopmax(['8I2EQ', '60ZE7'], timeout=1) == (b'a', b'a1', 1) assert r.bzpopmax(['8I2EQ', '60ZE7'], timeout=1) is None r.zadd('R8H1V', {'c1': 100}) assert r.bzpopmax('R8H1V', timeout=1) == (b'c', b'c1', 100) @skip_if_server_version_lt('4.9.0') @skip_for_no_cluster_impl() def test_bzpopmin(self, r): """ Generated keys for slot 0 : ['60ZE7', '8I2EQ', 'R8H1V', 'NJP6N', '0VI0A', '0CEIC', 'MV75A', 'TMKD9'] """ r.zadd('60ZE7', {'a1': 1, 'a2': 2}) r.zadd('8I2EQ', {'b1': 10, 'b2': 20}) assert r.bzpopmin(['8I2EQ', '60ZE7'], timeout=1) == (b'b', b'b1', 10) assert r.bzpopmin(['8I2EQ', '60ZE7'], timeout=1) == (b'b', b'b2', 20) assert r.bzpopmin(['8I2EQ', '60ZE7'], timeout=1) == (b'a', b'a1', 1) assert r.bzpopmin(['8I2EQ', '60ZE7'], timeout=1) == (b'a', b'a2', 2) assert r.bzpopmin(['8I2EQ', '60ZE7'], timeout=1) is None r.zadd('R8H1V', {'c1': 100}) assert r.bzpopmin('R8H1V', timeout=1) == (b'c', b'c1', 100) def test_zrange(self, r): r.zadd('a', {'a1': 1, 'a2': 2, 'a3': 3}) assert r.zrange('a', 0, 1) == [b'a1', b'a2'] assert r.zrange('a', 1, 2) == [b'a2', b'a3'] # withscores assert r.zrange('a', 0, 1, withscores=True) == \ [(b'a1', 1.0), (b'a2', 2.0)] assert r.zrange('a', 1, 2, withscores=True) == \ [(b'a2', 2.0), (b'a3', 3.0)] # custom score function assert r.zrange('a', 0, 1, withscores=True, score_cast_func=int) == \ [(b'a1', 1), (b'a2', 2)] @skip_if_server_version_lt('2.8.9') def test_zrangebylex(self, r): r.zadd('a', {'a': 0, 'b': 0, 'c': 0, 'd': 0, 'e': 0, 'f': 0, 'g': 0}) assert r.zrangebylex('a', '-', '[c') == [b'a', b'b', b'c'] assert r.zrangebylex('a', '-', '(c') == [b'a', b'b'] assert r.zrangebylex('a', '[aaa', '(g') == \ [b'b', b'c', b'd', b'e', b'f'] assert r.zrangebylex('a', '[f', '+') == [b'f', b'g'] assert r.zrangebylex('a', '-', '+', start=3, num=2) == [b'd', b'e'] @skip_if_server_version_lt('2.9.9') def test_zrevrangebylex(self, r): r.zadd('a', {'a': 0, 'b': 0, 'c': 0, 'd': 0, 'e': 0, 'f': 0, 'g': 0}) assert r.zrevrangebylex('a', '[c', '-') == [b'c', b'b', b'a'] assert r.zrevrangebylex('a', '(c', '-') == [b'b', b'a'] assert r.zrevrangebylex('a', '(g', '[aaa') == \ [b'f', b'e', b'd', b'c', b'b'] assert r.zrevrangebylex('a', '+', '[f') == [b'g', b'f'] assert r.zrevrangebylex('a', '+', '-', start=3, num=2) == \ [b'd', b'c'] def test_zrangebyscore(self, r): r.zadd('a', {'a1': 1, 'a2': 2, 'a3': 3, 'a4': 4, 'a5': 5}) assert r.zrangebyscore('a', 2, 4) == [b'a2', b'a3', b'a4'] # slicing with start/num assert r.zrangebyscore('a', 2, 4, start=1, num=2) == \ [b'a3', b'a4'] # withscores assert r.zrangebyscore('a', 2, 4, withscores=True) == \ [(b'a2', 2.0), (b'a3', 3.0), (b'a4', 4.0)] # custom score function assert r.zrangebyscore('a', 2, 4, withscores=True, score_cast_func=int) == \ [(b'a2', 2), (b'a3', 3), (b'a4', 4)] def test_zrank(self, r): r.zadd('a', {'a1': 1, 'a2': 2, 'a3': 3, 'a4': 4, 'a5': 5}) assert r.zrank('a', 'a1') == 0 assert r.zrank('a', 'a2') == 1 assert r.zrank('a', 'a6') is None def test_zrem(self, r): r.zadd('a', {'a1': 1, 'a2': 2, 'a3': 3}) assert r.zrem('a', 'a2') == 1 assert r.zrange('a', 0, -1) == [b'a1', b'a3'] assert r.zrem('a', 'b') == 0 assert r.zrange('a', 0, -1) == [b'a1', b'a3'] def test_zrem_multiple_keys(self, r): r.zadd('a', {'a1': 1, 'a2': 2, 'a3': 3}) assert r.zrem('a', 'a1', 'a2') == 2 assert r.zrange('a', 0, 5) == [b'a3'] @skip_if_server_version_lt('2.8.9') def test_zremrangebylex(self, r): r.zadd('a', {'a': 0, 'b': 0, 'c': 0, 'd': 0, 'e': 0, 'f': 0, 'g': 0}) assert r.zremrangebylex('a', '-', '[c') == 3 assert r.zrange('a', 0, -1) == [b'd', b'e', b'f', b'g'] assert r.zremrangebylex('a', '[f', '+') == 2 assert r.zrange('a', 0, -1) == [b'd', b'e'] assert r.zremrangebylex('a', '[h', '+') == 0 assert r.zrange('a', 0, -1) == [b'd', b'e'] def test_zremrangebyrank(self, r): r.zadd('a', {'a1': 1, 'a2': 2, 'a3': 3, 'a4': 4, 'a5': 5}) assert r.zremrangebyrank('a', 1, 3) == 3 assert r.zrange('a', 0, 5) == [b'a1', b'a5'] def test_zremrangebyscore(self, r): r.zadd('a', {'a1': 1, 'a2': 2, 'a3': 3, 'a4': 4, 'a5': 5}) assert r.zremrangebyscore('a', 2, 4) == 3 assert r.zrange('a', 0, -1) == [b'a1', b'a5'] assert r.zremrangebyscore('a', 2, 4) == 0 assert r.zrange('a', 0, -1) == [b'a1', b'a5'] def test_zrevrange(self, r): r.zadd('a', {'a1': 1, 'a2': 2, 'a3': 3}) assert r.zrevrange('a', 0, 1) == [b'a3', b'a2'] assert r.zrevrange('a', 1, 2) == [b'a2', b'a1'] # withscores assert r.zrevrange('a', 0, 1, withscores=True) == \ [(b'a3', 3.0), (b'a2', 2.0)] assert r.zrevrange('a', 1, 2, withscores=True) == \ [(b'a2', 2.0), (b'a1', 1.0)] # custom score function assert r.zrevrange('a', 0, 1, withscores=True, score_cast_func=int) == \ [(b'a3', 3.0), (b'a2', 2.0)] def test_zrevrangebyscore(self, r): r.zadd('a', {'a1': 1, 'a2': 2, 'a3': 3, 'a4': 4, 'a5': 5}) assert r.zrevrangebyscore('a', 4, 2) == [b'a4', b'a3', b'a2'] # slicing with start/num assert r.zrevrangebyscore('a', 4, 2, start=1, num=2) == \ [b'a3', b'a2'] # withscores assert r.zrevrangebyscore('a', 4, 2, withscores=True) == \ [(b'a4', 4.0), (b'a3', 3.0), (b'a2', 2.0)] # custom score function assert r.zrevrangebyscore('a', 4, 2, withscores=True, score_cast_func=int) == \ [(b'a4', 4), (b'a3', 3), (b'a2', 2)] def test_zrevrank(self, r): r.zadd('a', {'a1': 1, 'a2': 2, 'a3': 3, 'a4': 4, 'a5': 5}) assert r.zrevrank('a', 'a1') == 4 assert r.zrevrank('a', 'a2') == 3 assert r.zrevrank('a', 'a6') is None def test_zscore(self, r): r.zadd('a', {'a1': 1, 'a2': 2, 'a3': 3}) assert r.zscore('a', 'a1') == 1.0 assert r.zscore('a', 'a2') == 2.0 assert r.zscore('a', 'a4') is None def test_zunionstore_sum(self, r): """ Generated keys for slot 0 : ['60ZE7', '8I2EQ', 'R8H1V', 'NJP6N', '0VI0A', '0CEIC', 'MV75A', 'TMKD9'] """ r.zadd('60ZE7', {'a1': 1, 'a2': 1, 'a3': 1}) r.zadd('8I2EQ', {'a1': 2, 'a2': 2, 'a3': 2}) r.zadd('R8H1V', {'a1': 6, 'a3': 5, 'a4': 4}) assert r.zunionstore('NJP6N', ['60ZE7', '8I2EQ', 'R8H1V']) == 4 assert r.zrange('NJP6N', 0, -1, withscores=True) == \ [(b'a2', 3), (b'a4', 4), (b'a3', 8), (b'a1', 9)] def test_zunionstore_max(self, r): """ Generated keys for slot 0 : ['60ZE7', '8I2EQ', 'R8H1V', 'NJP6N', '0VI0A', '0CEIC', 'MV75A', 'TMKD9'] """ r.zadd('60ZE7', {'a1': 1, 'a2': 1, 'a3': 1}) r.zadd('8I2EQ', {'a1': 2, 'a2': 2, 'a3': 2}) r.zadd('R8H1V', {'a1': 6, 'a3': 5, 'a4': 4}) assert r.zunionstore('NJP6N', ['60ZE7', '8I2EQ', 'R8H1V'], aggregate='MAX') == 4 assert r.zrange('NJP6N', 0, -1, withscores=True) == \ [(b'a2', 2), (b'a4', 4), (b'a3', 5), (b'a1', 6)] def test_zunionstore_min(self, r): """ Generated keys for slot 0 : ['60ZE7', '8I2EQ', 'R8H1V', 'NJP6N', '0VI0A', '0CEIC', 'MV75A', 'TMKD9'] """ r.zadd('60ZE7', {'a1': 1, 'a2': 2, 'a3': 3}) r.zadd('8I2EQ', {'a1': 2, 'a2': 2, 'a3': 4}) r.zadd('R8H1V', {'a1': 6, 'a3': 5, 'a4': 4}) assert r.zunionstore('NJP6N', ['60ZE7', '8I2EQ', 'R8H1V'], aggregate='MIN') == 4 assert r.zrange('NJP6N', 0, -1, withscores=True) == \ [(b'a1', 1), (b'a2', 2), (b'a3', 3), (b'a4', 4)] def test_zunionstore_with_weight(self, r): """ Generated keys for slot 0 : ['60ZE7', '8I2EQ', 'R8H1V', 'NJP6N', '0VI0A', '0CEIC', 'MV75A', 'TMKD9'] """ r.zadd('60ZE7', {'a1': 1, 'a2': 1, 'a3': 1}) r.zadd('8I2EQ', {'a1': 2, 'a2': 2, 'a3': 2}) r.zadd('R8H1V', {'a1': 6, 'a3': 5, 'a4': 4}) assert r.zunionstore('NJP6N', {'60ZE7': 1, '8I2EQ': 2, 'R8H1V': 3}) == 4 assert r.zrange('NJP6N', 0, -1, withscores=True) == \ [(b'a2', 5), (b'a4', 12), (b'a3', 20), (b'a1', 23)] # HYPERLOGLOG TESTS @skip_if_server_version_lt('2.8.9') def test_pfadd(self, r): members = {b'1', b'2', b'3'} assert r.pfadd('a', *members) == 1 assert r.pfadd('a', *members) == 0 assert r.pfcount('a') == len(members) @pytest.mark.xfail(reason="New pfcount in 2.10.5 currently breaks in cluster") @skip_if_server_version_lt('2.8.9') def test_pfcount(self, r): members = {b'1', b'2', b'3'} r.pfadd('a', *members) assert r.pfcount('a') == len(members) members_b = {b'2', b'3', b'4'} r.pfadd('b', *members_b) assert r.pfcount('b') == len(members_b) assert r.pfcount('a', 'b') == len(members_b.union(members)) @skip_if_server_version_lt('2.8.9') def test_pfmerge(self, r): mema = {b'1', b'2', b'3'} memb = {b'2', b'3', b'4'} memc = {b'5', b'6', b'7'} r.pfadd('a', *mema) r.pfadd('b', *memb) r.pfadd('c', *memc) r.pfmerge('d', 'c', 'a') assert r.pfcount('d') == 6 r.pfmerge('d', 'b') assert r.pfcount('d') == 7 # HASH COMMANDS def test_hget_and_hset(self, r): r.hmset('a', {'1': 1, '2': 2, '3': 3}) assert r.hget('a', '1') == b'1' assert r.hget('a', '2') == b'2' assert r.hget('a', '3') == b'3' # field was updated, redis returns 0 assert r.hset('a', '2', 5) == 0 assert r.hget('a', '2') == b'5' # field is new, redis returns 1 assert r.hset('a', '4', 4) == 1 assert r.hget('a', '4') == b'4' # key inside of hash that doesn't exist returns null value assert r.hget('a', 'b') is None def test_hdel(self, r): r.hmset('a', {'1': 1, '2': 2, '3': 3}) assert r.hdel('a', '2') == 1 assert r.hget('a', '2') is None assert r.hdel('a', '1', '3') == 2 assert r.hlen('a') == 0 def test_hexists(self, r): r.hmset('a', {'1': 1, '2': 2, '3': 3}) assert r.hexists('a', '1') assert not r.hexists('a', '4') def test_hgetall(self, r): h = {b'a1': b'1', b'a2': b'2', b'a3': b'3'} r.hmset('a', h) assert r.hgetall('a') == h def test_hincrby(self, r): assert r.hincrby('a', '1') == 1 assert r.hincrby('a', '1', amount=2) == 3 assert r.hincrby('a', '1', amount=-2) == 1 @skip_if_server_version_lt('2.6.0') def test_hincrbyfloat(self, r): assert r.hincrbyfloat('a', '1') == 1.0 assert r.hincrbyfloat('a', '1') == 2.0 assert r.hincrbyfloat('a', '1', 1.2) == 3.2 def test_hkeys(self, r): h = {b'a1': b'1', b'a2': b'2', b'a3': b'3'} r.hmset('a', h) local_keys = list(iterkeys(h)) remote_keys = r.hkeys('a') assert (sorted(local_keys) == sorted(remote_keys)) def test_hlen(self, r): r.hmset('a', {'1': 1, '2': 2, '3': 3}) assert r.hlen('a') == 3 def test_hmget(self, r): assert r.hmset('a', {'a': 1, 'b': 2, 'c': 3}) assert r.hmget('a', 'a', 'b', 'c') == [b'1', b'2', b'3'] def test_hmset(self, r): h = {b'a': b'1', b'b': b'2', b'c': b'3'} assert r.hmset('a', h) assert r.hgetall('a') == h def test_hsetnx(self, r): # Initially set the hash field assert r.hsetnx('a', '1', 1) assert r.hget('a', '1') == b'1' assert not r.hsetnx('a', '1', 2) assert r.hget('a', '1') == b'1' def test_hvals(self, r): h = {b'a1': b'1', b'a2': b'2', b'a3': b'3'} r.hmset('a', h) local_vals = list(itervalues(h)) remote_vals = r.hvals('a') assert sorted(local_vals) == sorted(remote_vals) @skip_if_server_version_lt('3.2.0') def test_hstrlen(self, r): r.hmset('a', {'1': '22', '2': '333'}) assert r.hstrlen('a', '1') == 2 assert r.hstrlen('a', '2') == 3 # SORT def test_sort_basic(self, r): r.rpush('a', '3', '2', '1', '4') assert r.sort('a') == [b'1', b'2', b'3', b'4'] def test_sort_limited(self, r): r.rpush('a', '3', '2', '1', '4') assert r.sort('a', start=1, num=2) == [b'2', b'3'] @skip_for_no_cluster_impl() def test_sort_by(self, r): r['score:1'] = 8 r['score:2'] = 3 r['score:3'] = 5 r.rpush('a', '3', '2', '1') assert r.sort('a', by='score:*') == [b'2', b'3', b'1'] @skip_for_no_cluster_impl() def test_sort_get(self, r): r['user:1'] = 'u1' r['user:2'] = 'u2' r['user:3'] = 'u3' r.rpush('a', '2', '3', '1') assert r.sort('a', get='user:*') == [b'u1', b'u2', b'u3'] @skip_for_no_cluster_impl() def test_sort_get_multi(self, r): r['user:1'] = 'u1' r['user:2'] = 'u2' r['user:3'] = 'u3' r.rpush('a', '2', '3', '1') assert r.sort('a', get=('user:*', '#')) == \ [b'u1', b'1', b'u2', b'2', b'u3', b'3'] @skip_for_no_cluster_impl() def test_sort_get_groups_two(self, r): r['user:1'] = 'u1' r['user:2'] = 'u2' r['user:3'] = 'u3' r.rpush('a', '2', '3', '1') assert r.sort('a', get=('user:*', '#'), groups=True) == \ [(b'u1', b'1'), (b'u2', b'2'), (b'u3', b'3')] def test_sort_groups_string_get(self, r): r['user:1'] = 'u1' r['user:2'] = 'u2' r['user:3'] = 'u3' r.rpush('a', '2', '3', '1') with pytest.raises(exceptions.DataError): r.sort('a', get='user:*', groups=True) def test_sort_groups_just_one_get(self, r): r['user:1'] = 'u1' r['user:2'] = 'u2' r['user:3'] = 'u3' r.rpush('a', '2', '3', '1') with pytest.raises(exceptions.DataError): r.sort('a', get=['user:*'], groups=True) def test_sort_groups_no_get(self, r): r['user:1'] = 'u1' r['user:2'] = 'u2' r['user:3'] = 'u3' r.rpush('a', '2', '3', '1') with pytest.raises(exceptions.DataError): r.sort('a', groups=True) @skip_for_no_cluster_impl() def test_sort_groups_three_gets(self, r): r['user:1'] = 'u1' r['user:2'] = 'u2' r['user:3'] = 'u3' r['door:1'] = 'd1' r['door:2'] = 'd2' r['door:3'] = 'd3' r.rpush('a', '2', '3', '1') assert r.sort('a', get=('user:*', 'door:*', '#'), groups=True) == \ [ (b'u1', b'd1', b'1'), (b'u2', b'd2', b'2'), (b'u3', b'd3', b'3') ] def test_sort_desc(self, r): r.rpush('a', '2', '3', '1') assert r.sort('a', desc=True) == [b'3', b'2', b'1'] def test_sort_alpha(self, r): r.rpush('a', 'e', 'c', 'b', 'd', 'a') assert r.sort('a', alpha=True) == \ [b'a', b'b', b'c', b'd', b'e'] def test_sort_store(self, r): """ Generated keys for slot 0 : ['60ZE7', '8I2EQ', 'R8H1V', 'NJP6N', '0VI0A', '0CEIC', 'MV75A', 'TMKD9'] """ r.rpush('60ZE7', '2', '3', '1') assert r.sort('60ZE7', store='8I2EQ') == 3 assert r.lrange('8I2EQ', 0, -1) == [b'1', b'2', b'3'] @skip_for_no_cluster_impl() def test_sort_all_options(self, r): """ Generated keys for slot 0 : ['60ZE7', '8I2EQ', 'R8H1V', 'NJP6N', '0VI0A', '0CEIC', 'MV75A', 'TMKD9'] """ r['user:1:username'] = 'zeus' r['user:2:username'] = 'titan' r['user:3:username'] = 'hermes' r['user:4:username'] = 'hercules' r['user:5:username'] = 'apollo' r['user:6:username'] = 'athena' r['user:7:username'] = 'hades' r['user:8:username'] = 'dionysus' r['user:1:favorite_drink'] = 'yuengling' r['user:2:favorite_drink'] = 'rum' r['user:3:favorite_drink'] = 'vodka' r['user:4:favorite_drink'] = 'milk' r['user:5:favorite_drink'] = 'pinot noir' r['user:6:favorite_drink'] = 'water' r['user:7:favorite_drink'] = 'gin' r['user:8:favorite_drink'] = 'apple juice' r.rpush('gods', '5', '8', '3', '1', '2', '7', '6', '4') num = r.sort('gods', start=2, num=4, by='user:*:username', get='user:*:favorite_drink', desc=True, alpha=True, store='sorted') assert num == 4 assert r.lrange('sorted', 0, 10) == \ [b'vodka', b'milk', b'gin', b'apple juice'] def test_sort_issue_924(self, r): # Tests for issue https://github.com/andymccurdy/redis-py/issues/924 r.execute_command('SADD', 'issue#924', 1) r.execute_command('SORT', 'issue#924') @skip_for_no_cluster_impl() def test_cluster_addslots(self, mock_cluster_resp_ok): assert mock_cluster_resp_ok.cluster('ADDSLOTS', 1) is True @skip_for_no_cluster_impl() def test_cluster_count_failure_reports(self, mock_cluster_resp_int): assert isinstance(mock_cluster_resp_int.cluster( 'COUNT-FAILURE-REPORTS', 'node'), int) @skip_for_no_cluster_impl() def test_cluster_countkeysinslot(self, mock_cluster_resp_int): assert isinstance(mock_cluster_resp_int.cluster( 'COUNTKEYSINSLOT', 2), int) @skip_for_no_cluster_impl() def test_cluster_delslots(self, mock_cluster_resp_ok): assert mock_cluster_resp_ok.cluster('DELSLOTS', 1) is True @skip_for_no_cluster_impl() def test_cluster_failover(self, mock_cluster_resp_ok): assert mock_cluster_resp_ok.cluster('FAILOVER', 1) is True @skip_for_no_cluster_impl() def test_cluster_forget(self, mock_cluster_resp_ok): assert mock_cluster_resp_ok.cluster('FORGET', 1) is True @skip_for_no_cluster_impl() def test_cluster_info(self, mock_cluster_resp_info): assert isinstance(mock_cluster_resp_info.cluster('info'), dict) @skip_for_no_cluster_impl() def test_cluster_keyslot(self, mock_cluster_resp_int): assert isinstance(mock_cluster_resp_int.cluster( 'keyslot', 'asdf'), int) @skip_for_no_cluster_impl() def test_cluster_meet(self, mock_cluster_resp_ok): assert mock_cluster_resp_ok.cluster('meet', 'ip', 'port', 1) is True @skip_for_no_cluster_impl() def test_cluster_nodes(self, mock_cluster_resp_nodes): assert isinstance(mock_cluster_resp_nodes.cluster('nodes'), dict) @skip_for_no_cluster_impl() def test_cluster_replicate(self, mock_cluster_resp_ok): assert mock_cluster_resp_ok.cluster('replicate', 'nodeid') is True @skip_for_no_cluster_impl() def test_cluster_reset(self, mock_cluster_resp_ok): assert mock_cluster_resp_ok.cluster('reset', 'hard') is True @skip_for_no_cluster_impl() def test_cluster_saveconfig(self, mock_cluster_resp_ok): assert mock_cluster_resp_ok.cluster('saveconfig') is True @skip_for_no_cluster_impl() def test_cluster_setslot(self, mock_cluster_resp_ok): assert mock_cluster_resp_ok.cluster('setslot', 1, 'IMPORTING', 'nodeid') is True @skip_for_no_cluster_impl() def test_cluster_slaves(self, mock_cluster_resp_slaves): assert isinstance(mock_cluster_resp_slaves.cluster( 'slaves', 'nodeid'), dict) # GEO COMMANDS @skip_if_server_version_lt('3.2.0') def test_geoadd(self, r): values = (2.1909389952632, 41.433791470673, 'place1') +\ (2.1873744593677, 41.406342043777, 'place2') assert r.geoadd('barcelona', *values) == 2 assert r.zcard('barcelona') == 2 @skip_if_server_version_lt('3.2.0') def test_geoadd_invalid_params(self, r): with pytest.raises(exceptions.RedisError): r.geoadd('barcelona', *(1, 2)) @skip_if_server_version_lt('3.2.0') def test_geodist(self, r): values = (2.1909389952632, 41.433791470673, 'place1') +\ (2.1873744593677, 41.406342043777, 'place2') assert r.geoadd('barcelona', *values) == 2 assert r.geodist('barcelona', 'place1', 'place2') == 3067.4157 @skip_if_server_version_lt('3.2.0') def test_geodist_units(self, r): values = (2.1909389952632, 41.433791470673, 'place1') +\ (2.1873744593677, 41.406342043777, 'place2') r.geoadd('barcelona', *values) assert r.geodist('barcelona', 'place1', 'place2', 'km') == 3.0674 @skip_if_server_version_lt('3.2.0') def test_geodist_missing_one_member(self, r): values = (2.1909389952632, 41.433791470673, 'place1') r.geoadd('barcelona', *values) assert r.geodist('barcelona', 'place1', 'missing_member', 'km') is None @skip_if_server_version_lt('3.2.0') def test_geodist_invalid_units(self, r): with pytest.raises(exceptions.RedisError): assert r.geodist('x', 'y', 'z', 'inches') @skip_if_server_version_lt('3.2.0') def test_geohash(self, r): values = (2.1909389952632, 41.433791470673, 'place1') +\ (2.1873744593677, 41.406342043777, 'place2') r.geoadd('barcelona', *values) assert r.geohash('barcelona', 'place1', 'place2') ==\ ['sp3e9yg3kd0', 'sp3e9cbc3t0'] @skip_if_server_version_lt('3.2.0') def test_geopos(self, r): values = (2.1909389952632, 41.433791470673, 'place1') +\ (2.1873744593677, 41.406342043777, 'place2') r.geoadd('barcelona', *values) # redis uses 52 bits precision, hereby small errors may be introduced. assert r.geopos('barcelona', 'place1', 'place2') ==\ [(2.19093829393386841, 41.43379028184083523), (2.18737632036209106, 41.40634178640635099)] @skip_if_server_version_lt('4.0.0') def test_geopos_no_value(self, r): assert r.geopos('barcelona', 'place1', 'place2') == [None, None] @skip_if_server_version_lt('3.2.0') @skip_if_server_version_gte('4.0.0') def test_old_geopos_no_value(self, r): assert r.geopos('barcelona', 'place1', 'place2') == [] @skip_if_server_version_lt('3.2.0') def test_georadius(self, r): values = (2.1909389952632, 41.433791470673, 'place1') +\ (2.1873744593677, 41.406342043777, 'place2') r.geoadd('barcelona', *values) assert r.georadius('barcelona', 2.191, 41.433, 1000) == ['place1'] @skip_if_server_version_lt('3.2.0') def test_georadius_no_values(self, r): values = (2.1909389952632, 41.433791470673, 'place1') +\ (2.1873744593677, 41.406342043777, 'place2') r.geoadd('barcelona', *values) assert r.georadius('barcelona', 1, 2, 1000) == [] @skip_if_server_version_lt('3.2.0') def test_georadius_units(self, r): values = (2.1909389952632, 41.433791470673, 'place1') +\ (2.1873744593677, 41.406342043777, 'place2') r.geoadd('barcelona', *values) assert r.georadius('barcelona', 2.191, 41.433, 1, unit='km') ==\ ['place1'] @skip_if_server_version_lt('3.2.0') def test_georadius_with(self, r): values = (2.1909389952632, 41.433791470673, 'place1') +\ (2.1873744593677, 41.406342043777, 'place2') r.geoadd('barcelona', *values) # test a bunch of combinations to test the parse response # function. assert r.georadius('barcelona', 2.191, 41.433, 1, unit='km', withdist=True, withcoord=True, withhash=True) ==\ [['place1', 0.0881, 3471609698139488, (2.19093829393386841, 41.43379028184083523)]] assert r.georadius('barcelona', 2.191, 41.433, 1, unit='km', withdist=True, withcoord=True) ==\ [['place1', 0.0881, (2.19093829393386841, 41.43379028184083523)]] assert r.georadius('barcelona', 2.191, 41.433, 1, unit='km', withhash=True, withcoord=True) ==\ [['place1', 3471609698139488, (2.19093829393386841, 41.43379028184083523)]] # test no values. assert r.georadius('barcelona', 2, 1, 1, unit='km', withdist=True, withcoord=True, withhash=True) == [] @skip_if_server_version_lt('3.2.0') def test_georadius_count(self, r): values = (2.1909389952632, 41.433791470673, 'place1') +\ (2.1873744593677, 41.406342043777, 'place2') r.geoadd('barcelona', *values) assert r.georadius('barcelona', 2.191, 41.433, 3000, count=1) ==\ ['place1'] @skip_if_server_version_lt('3.2.0') def test_georadius_sort(self, r): values = (2.1909389952632, 41.433791470673, 'place1') +\ (2.1873744593677, 41.406342043777, 'place2') r.geoadd('barcelona', *values) assert r.georadius('barcelona', 2.191, 41.433, 3000, sort='ASC') ==\ ['place1', 'place2'] assert r.georadius('barcelona', 2.191, 41.433, 3000, sort='DESC') ==\ ['place2', 'place1'] @skip_if_server_version_lt('3.2.0') def test_georadius_store(self, r): """ Generated keys for slot 0 : ['60ZE7', '8I2EQ'] """ values = (2.1909389952632, 41.433791470673, 'place1') +\ (2.1873744593677, 41.406342043777, 'place2') r.geoadd('60ZE7', *values) r.georadius('60ZE7', 2.191, 41.433, 1000, store='8I2EQ') assert r.zrange('8I2EQ', 0, -1) == [b'place1'] @skip_if_server_version_lt('3.2.0') def test_georadius_store_dist(self, r): """ Generated keys for slot 0 : ['60ZE7', '8I2EQ'] """ values = (2.1909389952632, 41.433791470673, 'place1') +\ (2.1873744593677, 41.406342043777, 'place2') r.geoadd('60ZE7', *values) r.georadius('60ZE7', 2.191, 41.433, 1000, store_dist='8I2EQ') # instead of save the geo score, the distance is saved. assert r.zscore('8I2EQ', 'place1') == 88.05060698409301 @skip_if_server_version_lt('3.2.0') def test_georadiusmember(self, r): values = (2.1909389952632, 41.433791470673, 'place1') +\ (2.1873744593677, 41.406342043777, 'place2') r.geoadd('barcelona', *values) assert r.georadiusbymember('barcelona', 'place1', 4000) ==\ ['place2', 'place1'] assert r.georadiusbymember('barcelona', 'place1', 10) == ['place1'] assert r.georadiusbymember('barcelona', 'place1', 4000, withdist=True, withcoord=True, withhash=True) ==\ [['place2', 3067.4157, 3471609625421029, (2.187376320362091, 41.40634178640635)], ['place1', 0.0, 3471609698139488, (2.1909382939338684, 41.433790281840835)]] @skip_if_server_version_lt('5.0.0') @skip_for_no_cluster_impl() def test_xack(self, r): stream = 'stream' group = 'group' consumer = 'consumer' # xack on a stream that doesn't exist assert r.xack(stream, group, '0-0') == 0 m1 = r.xadd(stream, {'one': 'one'}) m2 = r.xadd(stream, {'two': 'two'}) m3 = r.xadd(stream, {'three': 'three'}) # xack on a group that doesn't exist assert r.xack(stream, group, m1) == 0 r.xgroup_create(stream, group, 0) r.xreadgroup(group, consumer, streams={stream: 0}) # xack returns the number of ack'd elements assert r.xack(stream, group, m1) == 1 assert r.xack(stream, group, m2, m3) == 2 @skip_if_server_version_lt('5.0.0') @skip_for_no_cluster_impl() def test_xadd(self, r): stream = 'stream' message_id = r.xadd(stream, {'foo': 'bar'}) assert re.match(br'[0-9]+\-[0-9]+', message_id) # explicit message id message_id = b'9999999999999999999-0' assert message_id == r.xadd(stream, {'foo': 'bar'}, id=message_id) # with maxlen, the list evicts the first message r.xadd(stream, {'foo': 'bar'}, maxlen=2, approximate=False) assert r.xlen(stream) == 2 @skip_if_server_version_lt('5.0.0') @skip_for_no_cluster_impl() def test_xclaim(self, r): stream = 'stream' group = 'group' consumer1 = 'consumer1' consumer2 = 'consumer2' message_id = r.xadd(stream, {'john': 'wick'}) message = get_stream_message(r, stream, message_id) r.xgroup_create(stream, group, 0) # trying to claim a message that isn't already pending doesn't # do anything response = r.xclaim(stream, group, consumer2, min_idle_time=0, message_ids=(message_id,)) assert response == [] # read the group as consumer1 to initially claim the messages r.xreadgroup(group, consumer1, streams={stream: 0}) # claim the message as consumer2 response = r.xclaim(stream, group, consumer2, min_idle_time=0, message_ids=(message_id,)) assert response[0] == message # reclaim the message as consumer1, but use the justid argument # which only returns message ids assert r.xclaim(stream, group, consumer1, min_idle_time=0, message_ids=(message_id,), justid=True) == [message_id] @skip_if_server_version_lt('5.0.0') @skip_for_no_cluster_impl() def test_xdel(self, r): stream = 'stream' # deleting from an empty stream doesn't do anything assert r.xdel(stream, 1) == 0 m1 = r.xadd(stream, {'foo': 'bar'}) m2 = r.xadd(stream, {'foo': 'bar'}) m3 = r.xadd(stream, {'foo': 'bar'}) # xdel returns the number of deleted elements assert r.xdel(stream, m1) == 1 assert r.xdel(stream, m2, m3) == 2 @skip_if_server_version_lt('5.0.0') @skip_for_no_cluster_impl() def test_xgroup_create(self, r): # tests xgroup_create and xinfo_groups stream = 'stream' group = 'group' r.xadd(stream, {'foo': 'bar'}) # no group is setup yet, no info to obtain assert r.xinfo_groups(stream) == [] assert r.xgroup_create(stream, group, 0) expected = [{ 'name': group.encode(), 'consumers': 0, 'pending': 0, 'last-delivered-id': b'0-0' }] assert r.xinfo_groups(stream) == expected @skip_if_server_version_lt('5.0.0') @skip_for_no_cluster_impl() def test_xgroup_create_mkstream(self, r): # tests xgroup_create and xinfo_groups stream = 'stream' group = 'group' # an error is raised if a group is created on a stream that # doesn't already exist with pytest.raises(exceptions.ResponseError): r.xgroup_create(stream, group, 0) # however, with mkstream=True, the underlying stream is created # automatically assert r.xgroup_create(stream, group, 0, mkstream=True) expected = [{ 'name': group.encode(), 'consumers': 0, 'pending': 0, 'last-delivered-id': b'0-0' }] assert r.xinfo_groups(stream) == expected @skip_if_server_version_lt('5.0.0') @skip_for_no_cluster_impl() def test_xgroup_delconsumer(self, r): stream = 'stream' group = 'group' consumer = 'consumer' r.xadd(stream, {'foo': 'bar'}) r.xadd(stream, {'foo': 'bar'}) r.xgroup_create(stream, group, 0) # a consumer that hasn't yet read any messages doesn't do anything assert r.xgroup_delconsumer(stream, group, consumer) == 0 # read all messages from the group r.xreadgroup(group, consumer, streams={stream: 0}) # deleting the consumer should return 2 pending messages assert r.xgroup_delconsumer(stream, group, consumer) == 2 @skip_if_server_version_lt('5.0.0') @skip_for_no_cluster_impl() def test_xgroup_destroy(self, r): stream = 'stream' group = 'group' r.xadd(stream, {'foo': 'bar'}) # destroying a nonexistent group returns False assert not r.xgroup_destroy(stream, group) r.xgroup_create(stream, group, 0) assert r.xgroup_destroy(stream, group) @skip_if_server_version_lt('5.0.0') @skip_for_no_cluster_impl() def test_xgroup_setid(self, r): stream = 'stream' group = 'group' message_id = r.xadd(stream, {'foo': 'bar'}) r.xgroup_create(stream, group, 0) # advance the last_delivered_id to the message_id r.xgroup_setid(stream, group, message_id) expected = [{ 'name': group.encode(), 'consumers': 0, 'pending': 0, 'last-delivered-id': message_id }] assert r.xinfo_groups(stream) == expected @skip_if_server_version_lt('5.0.0') @skip_for_no_cluster_impl() def test_xinfo_consumers(self, r): stream = 'stream' group = 'group' consumer1 = 'consumer1' consumer2 = 'consumer2' r.xadd(stream, {'foo': 'bar'}) r.xgroup_create(stream, group, 0) r.xreadgroup(group, consumer1, streams={stream: 0}) r.xreadgroup(group, consumer2, streams={stream: 0}) info = r.xinfo_consumers(stream, group) assert len(info) == 2 expected = [ {'name': consumer1.encode(), 'pending': 1}, {'name': consumer2.encode(), 'pending': 0}, ] # we can't determine the idle time, so just make sure it's an int assert isinstance(info[0].pop('idle'), (int, long)) assert isinstance(info[1].pop('idle'), (int, long)) assert info == expected @skip_if_server_version_lt('5.0.0') @skip_for_no_cluster_impl() def test_xinfo_stream(self, r): stream = 'stream' m1 = r.xadd(stream, {'foo': 'bar'}) m2 = r.xadd(stream, {'foo': 'bar'}) info = r.xinfo_stream(stream) assert info['length'] == 2 assert info['first-entry'] == get_stream_message(r, stream, m1) assert info['last-entry'] == get_stream_message(r, stream, m2) @skip_if_server_version_lt('5.0.0') @skip_for_no_cluster_impl() def test_xlen(self, r): stream = 'stream' assert r.xlen(stream) == 0 r.xadd(stream, {'foo': 'bar'}) r.xadd(stream, {'foo': 'bar'}) assert r.xlen(stream) == 2 @skip_if_server_version_lt('5.0.0') @skip_for_no_cluster_impl() def test_xpending(self, r): stream = 'stream' group = 'group' consumer1 = 'consumer1' consumer2 = 'consumer2' m1 = r.xadd(stream, {'foo': 'bar'}) m2 = r.xadd(stream, {'foo': 'bar'}) r.xgroup_create(stream, group, 0) # xpending on a group that has no consumers yet expected = { 'pending': 0, 'min': None, 'max': None, 'consumers': [] } assert r.xpending(stream, group) == expected # read 1 message from the group with each consumer r.xreadgroup(group, consumer1, streams={stream: 0}, count=1) r.xreadgroup(group, consumer2, streams={stream: m1}, count=1) expected = { 'pending': 2, 'min': m1, 'max': m2, 'consumers': [ {'name': consumer1.encode(), 'pending': 1}, {'name': consumer2.encode(), 'pending': 1}, ] } assert r.xpending(stream, group) == expected @skip_if_server_version_lt('5.0.0') @skip_for_no_cluster_impl() def test_xpending_range(self, r): stream = 'stream' group = 'group' consumer1 = 'consumer1' consumer2 = 'consumer2' m1 = r.xadd(stream, {'foo': 'bar'}) m2 = r.xadd(stream, {'foo': 'bar'}) r.xgroup_create(stream, group, 0) # xpending range on a group that has no consumers yet assert r.xpending_range(stream, group) == [] # read 1 message from the group with each consumer r.xreadgroup(group, consumer1, streams={stream: 0}, count=1) r.xreadgroup(group, consumer2, streams={stream: m1}, count=1) response = r.xpending_range(stream, group) assert len(response) == 2 assert response[0]['message_id'] == m1 assert response[0]['consumer'] == consumer1.encode() assert response[1]['message_id'] == m2 assert response[1]['consumer'] == consumer2.encode() @skip_if_server_version_lt('5.0.0') @skip_for_no_cluster_impl() def test_xrange(self, r): stream = 'stream' m1 = r.xadd(stream, {'foo': 'bar'}) m2 = r.xadd(stream, {'foo': 'bar'}) m3 = r.xadd(stream, {'foo': 'bar'}) m4 = r.xadd(stream, {'foo': 'bar'}) def get_ids(results): return [result[0] for result in results] results = r.xrange(stream, min=m1) assert get_ids(results) == [m1, m2, m3, m4] results = r.xrange(stream, min=m2, max=m3) assert get_ids(results) == [m2, m3] results = r.xrange(stream, max=m3) assert get_ids(results) == [m1, m2, m3] results = r.xrange(stream, max=m2, count=1) assert get_ids(results) == [m1] @skip_if_server_version_lt('5.0.0') @skip_for_no_cluster_impl() def test_xread(self, r): stream = 'stream' m1 = r.xadd(stream, {'foo': 'bar'}) m2 = r.xadd(stream, {'bing': 'baz'}) expected = [ [ stream, [ get_stream_message(r, stream, m1), get_stream_message(r, stream, m2), ] ] ] # xread starting at 0 returns both messages assert r.xread(streams={stream: 0}) == expected expected = [ [ stream, [ get_stream_message(r, stream, m1), ] ] ] # xread starting at 0 and count=1 returns only the first message assert r.xread(streams={stream: 0}, count=1) == expected expected = [ [ stream, [ get_stream_message(r, stream, m2), ] ] ] # xread starting at m1 returns only the second message assert r.xread(streams={stream: m1}) == expected # xread starting at the last message returns an empty list assert r.xread(streams={stream: m2}) == [] @skip_if_server_version_lt('5.0.0') @skip_for_no_cluster_impl() def test_xreadgroup(self, r): stream = 'stream' group = 'group' consumer = 'consumer' m1 = r.xadd(stream, {'foo': 'bar'}) m2 = r.xadd(stream, {'bing': 'baz'}) r.xgroup_create(stream, group, 0) expected = [ [ stream, [ get_stream_message(r, stream, m1), get_stream_message(r, stream, m2), ] ] ] # xread starting at 0 returns both messages assert r.xreadgroup(group, consumer, streams={stream: 0}) == expected r.xgroup_destroy(stream, group) r.xgroup_create(stream, group, 0) expected = [ [ stream, [ get_stream_message(r, stream, m1), ] ] ] # xread starting at 0 and count=1 returns only the first message assert r.xreadgroup(group, consumer, streams={stream: 0}, count=1) == \ expected r.xgroup_destroy(stream, group) r.xgroup_create(stream, group, 0) expected = [ [ stream, [ get_stream_message(r, stream, m2), ] ] ] # xread starting at m1 returns only the second message assert r.xreadgroup(group, consumer, streams={stream: m1}) == expected r.xgroup_destroy(stream, group) r.xgroup_create(stream, group, 0) # xread starting at the last message returns an empty message list expected = [ [ stream, [] ] ] assert r.xreadgroup(group, consumer, streams={stream: m2}) == expected @skip_if_server_version_lt('5.0.0') @skip_for_no_cluster_impl() def test_xrevrange(self, r): stream = 'stream' m1 = r.xadd(stream, {'foo': 'bar'}) m2 = r.xadd(stream, {'foo': 'bar'}) m3 = r.xadd(stream, {'foo': 'bar'}) m4 = r.xadd(stream, {'foo': 'bar'}) def get_ids(results): return [result[0] for result in results] results = r.xrevrange(stream, max=m4) assert get_ids(results) == [m4, m3, m2, m1] results = r.xrevrange(stream, max=m3, min=m2) assert get_ids(results) == [m3, m2] results = r.xrevrange(stream, min=m3) assert get_ids(results) == [m4, m3] results = r.xrevrange(stream, min=m2, count=1) assert get_ids(results) == [m4] @skip_if_server_version_lt('5.0.0') @skip_for_no_cluster_impl() def test_xtrim(self, r): stream = 'stream' # trimming an empty key doesn't do anything assert r.xtrim(stream, 1000) == 0 r.xadd(stream, {'foo': 'bar'}) r.xadd(stream, {'foo': 'bar'}) r.xadd(stream, {'foo': 'bar'}) r.xadd(stream, {'foo': 'bar'}) # trimming an amount large than the number of messages # doesn't do anything assert r.xtrim(stream, 5, approximate=False) == 0 # 1 message is trimmed assert r.xtrim(stream, 3, approximate=False) == 1 @skip_if_server_version_lt('3.2.0') def test_bitfield_operations(self, r): # comments show affected bits bf = r.bitfield('a') resp = (bf .set('u8', 8, 255) # 00000000 11111111 .get('u8', 0) # 00000000 .get('u4', 8) # 1111 .get('u4', 12) # 1111 .get('u4', 13) # 111 0 .execute()) assert resp == [0, 0, 15, 15, 14] # .set() returns the previous value... resp = (bf .set('u8', 4, 1) # 0000 0001 .get('u16', 0) # 00000000 00011111 .set('u16', 0, 0) # 00000000 00000000 .execute()) assert resp == [15, 31, 31] # incrby adds to the value resp = (bf .incrby('u8', 8, 254) # 00000000 11111110 .incrby('u8', 8, 1) # 00000000 11111111 .get('u16', 0) # 00000000 11111111 .execute()) assert resp == [254, 255, 255] # Verify overflow protection works as a method: r.delete('a') resp = (bf .set('u8', 8, 254) # 00000000 11111110 .overflow('fail') .incrby('u8', 8, 2) # incrby 2 would overflow, None returned .incrby('u8', 8, 1) # 00000000 11111111 .incrby('u8', 8, 1) # incrby 1 would overflow, None returned .get('u16', 0) # 00000000 11111111 .execute()) assert resp == [0, None, 255, None, 255] # Verify overflow protection works as arg to incrby: r.delete('a') resp = (bf .set('u8', 8, 255) # 00000000 11111111 .incrby('u8', 8, 1) # 00000000 00000000 wrap default .set('u8', 8, 255) # 00000000 11111111 .incrby('u8', 8, 1, 'FAIL') # 00000000 11111111 fail .incrby('u8', 8, 1) # 00000000 11111111 still fail .get('u16', 0) # 00000000 11111111 .execute()) assert resp == [0, 0, 0, None, None, 255] # test default default_overflow r.delete('a') bf = r.bitfield('a', default_overflow='FAIL') resp = (bf .set('u8', 8, 255) # 00000000 11111111 .incrby('u8', 8, 1) # 00000000 11111111 fail default .get('u16', 0) # 00000000 11111111 .execute()) assert resp == [0, None, 255] @skip_if_server_version_lt('4.0.0') def test_memory_usage(self, r): r.set('foo', 'bar') assert isinstance(r.memory_usage('foo'), int) class TestRedisCommandsSort(object): # SORT def test_sort_basic(self, r): r.rpush('a', '3', '2', '1', '4') assert r.sort('a') == [b'1', b'2', b'3', b'4'] def test_sort_limited(self, r): r.rpush('a', '3', '2', '1', '4') assert r.sort('a', start=1, num=2) == [b'2', b'3'] @pytest.mark.skip(reason="Sort works if done against keys in same slot") def test_sort_by(self, r): r['score:1'] = 8 r['score:2'] = 3 r['score:3'] = 5 r.rpush('a', '3', '2', '1') assert r.sort('a', by='score:*') == [b'2', b'3', b'1'] @pytest.mark.skip(reason="Sort works if done against keys in same slot") def test_sort_get(self, r): r['user:1'] = 'u1' r['user:2'] = 'u2' r['user:3'] = 'u3' r.rpush('a', '2', '3', '1') assert r.sort('a', get='user:*') == [b'u1', b'u2', b'u3'] @pytest.mark.skip(reason="Sort works if done against keys in same slot") def test_sort_get_multi(self, r): r['user:1'] = 'u1' r['user:2'] = 'u2' r['user:3'] = 'u3' r.rpush('a', '2', '3', '1') assert r.sort('a', get=('user:*', '#')) == \ [b'u1', b'1', b'u2', b'2', b'u3', b'3'] @pytest.mark.skip(reason="Sort works if done against keys in same slot") def test_sort_get_groups_two(self, r): r['user:1'] = 'u1' r['user:2'] = 'u2' r['user:3'] = 'u3' r.rpush('a', '2', '3', '1') assert r.sort('a', get=('user:*', '#'), groups=True) == \ [(b'u1', b'1'), (b'u2', b'2'), (b'u3', b'3')] def test_sort_groups_string_get(self, r): r['user:1'] = 'u1' r['user:2'] = 'u2' r['user:3'] = 'u3' r.rpush('a', '2', '3', '1') with pytest.raises(DataError): r.sort('a', get='user:*', groups=True) def test_sort_groups_just_one_get(self, r): r['user:1'] = 'u1' r['user:2'] = 'u2' r['user:3'] = 'u3' r.rpush('a', '2', '3', '1') with pytest.raises(DataError): r.sort('a', get=['user:*'], groups=True) def test_sort_groups_no_get(self, r): r['user:1'] = 'u1' r['user:2'] = 'u2' r['user:3'] = 'u3' r.rpush('a', '2', '3', '1') with pytest.raises(DataError): r.sort('a', groups=True) @pytest.mark.skip(reason="Sort works if done against keys in same slot") def test_sort_groups_three_gets(self, r): r['user:1'] = 'u1' r['user:2'] = 'u2' r['user:3'] = 'u3' r['door:1'] = 'd1' r['door:2'] = 'd2' r['door:3'] = 'd3' r.rpush('a', '2', '3', '1') assert r.sort('a', get=('user:*', 'door:*', '#'), groups=True) == [ (b'u1', b'd1', b'1'), (b'u2', b'd2', b'2'), (b'u3', b'd3', b'3') ] def test_sort_desc(self, r): r.rpush('a', '2', '3', '1') assert r.sort('a', desc=True) == [b'3', b'2', b'1'] def test_sort_alpha(self, r): r.rpush('a', 'e', 'c', 'b', 'd', 'a') assert r.sort('a', alpha=True) == \ [b'a', b'b', b'c', b'd', b'e'] @pytest.mark.skip(reason="Sort works if done against keys in same slot") def test_sort_store(self, r): r.rpush('a', '2', '3', '1') assert r.sort('a', store='sorted_values') == 3 assert r.lrange('sorted_values', 0, -1) == [b'1', b'2', b'3'] @pytest.mark.skip(reason="Sort works if done against keys in same slot") def test_sort_all_options(self, r): r['user:1:username'] = 'zeus' r['user:2:username'] = 'titan' r['user:3:username'] = 'hermes' r['user:4:username'] = 'hercules' r['user:5:username'] = 'apollo' r['user:6:username'] = 'athena' r['user:7:username'] = 'hades' r['user:8:username'] = 'dionysus' r['user:1:favorite_drink'] = 'yuengling' r['user:2:favorite_drink'] = 'rum' r['user:3:favorite_drink'] = 'vodka' r['user:4:favorite_drink'] = 'milk' r['user:5:favorite_drink'] = 'pinot noir' r['user:6:favorite_drink'] = 'water' r['user:7:favorite_drink'] = 'gin' r['user:8:favorite_drink'] = 'apple juice' r.rpush('gods', '5', '8', '3', '1', '2', '7', '6', '4') num = r.sort('gods', start=2, num=4, by='user:*:username', get='user:*:favorite_drink', desc=True, alpha=True, store='sorted') assert num == 4 assert r.lrange('sorted', 0, 10) == \ [b'vodka', b'milk', b'gin', b'apple juice'] class TestBinarySave(object): def test_binary_get_set(self, r): assert r.set(' foo bar ', '123') assert r.get(' foo bar ') == b'123' assert r.set(' foo\r\nbar\r\n ', '456') assert r.get(' foo\r\nbar\r\n ') == b'456' assert r.set(' \r\n\t\x07\x13 ', '789') assert r.get(' \r\n\t\x07\x13 ') == b'789' assert sorted(r.keys('*')) == \ [b' \r\n\t\x07\x13 ', b' foo\r\nbar\r\n ', b' foo bar '] assert r.delete(' foo bar ') assert r.delete(' foo\r\nbar\r\n ') assert r.delete(' \r\n\t\x07\x13 ') def test_binary_lists(self, r): mapping = { b'foo bar': [b'1', b'2', b'3'], b'foo\r\nbar\r\n': [b'4', b'5', b'6'], b'foo\tbar\x07': [b'7', b'8', b'9'], } # fill in lists for key, value in iteritems(mapping): r.rpush(key, *value) # check that KEYS returns all the keys as they are assert sorted(r.keys('*')) == sorted(list(iterkeys(mapping))) # check that it is possible to get list content by key name for key, value in iteritems(mapping): assert r.lrange(key, 0, -1) == value def test_22_info(self): """ Older Redis versions contained 'allocation_stats' in INFO that was the cause of a number of bugs when parsing. """ info = "allocation_stats:6=1,7=1,8=7141,9=180,10=92,11=116,12=5330," \ "13=123,14=3091,15=11048,16=225842,17=1784,18=814,19=12020," \ "20=2530,21=645,22=15113,23=8695,24=142860,25=318,26=3303," \ "27=20561,28=54042,29=37390,30=1884,31=18071,32=31367,33=160," \ "34=169,35=201,36=10155,37=1045,38=15078,39=22985,40=12523," \ "41=15588,42=265,43=1287,44=142,45=382,46=945,47=426,48=171," \ "49=56,50=516,51=43,52=41,53=46,54=54,55=75,56=647,57=332," \ "58=32,59=39,60=48,61=35,62=62,63=32,64=221,65=26,66=30," \ "67=36,68=41,69=44,70=26,71=144,72=169,73=24,74=37,75=25," \ "76=42,77=21,78=126,79=374,80=27,81=40,82=43,83=47,84=46," \ "85=114,86=34,87=37,88=7240,89=34,90=38,91=18,92=99,93=20," \ "94=18,95=17,96=15,97=22,98=18,99=69,100=17,101=22,102=15," \ "103=29,104=39,105=30,106=70,107=22,108=21,109=26,110=52," \ "111=45,112=33,113=67,114=41,115=44,116=48,117=53,118=54," \ "119=51,120=75,121=44,122=57,123=44,124=66,125=56,126=52," \ "127=81,128=108,129=70,130=50,131=51,132=53,133=45,134=62," \ "135=12,136=13,137=7,138=15,139=21,140=11,141=20,142=6,143=7," \ "144=11,145=6,146=16,147=19,148=1112,149=1,151=83,154=1," \ "155=1,156=1,157=1,160=1,161=1,162=2,166=1,169=1,170=1,171=2," \ "172=1,174=1,176=2,177=9,178=34,179=73,180=30,181=1,185=3," \ "187=1,188=1,189=1,192=1,196=1,198=1,200=1,201=1,204=1,205=1," \ "207=1,208=1,209=1,214=2,215=31,216=78,217=28,218=5,219=2," \ "220=1,222=1,225=1,227=1,234=1,242=1,250=1,252=1,253=1," \ ">=256=203" parsed = parse_info(info) assert 'allocation_stats' in parsed assert '6' in parsed['allocation_stats'] assert '>=256' in parsed['allocation_stats'] def test_large_responses(self, r): "The PythonParser has some special cases for return values > 1MB" # load up 100K of data into a key data = ''.join([ascii_letters] * (100000 // len(ascii_letters))) r['a'] = data assert r['a'] == data.encode() def test_floating_point_encoding(self, r): """ High precision floating point values sent to the server should keep precision. """ timestamp = 1349673917.939762 r.zadd('a', {'a1': timestamp}) assert r.zscore('a', 'a1') == timestamp redis-py-cluster-2.0.0/tests/test_node_manager.py000066400000000000000000000315471352661744600221460ustar00rootroot00000000000000# -*- coding: utf-8 -*- # python std lib from __future__ import with_statement # rediscluster imports from tests.conftest import skip_if_server_version_lt from rediscluster import RedisCluster from rediscluster.exceptions import RedisClusterException from rediscluster.nodemanager import NodeManager # 3rd party imports import pytest from mock import patch, Mock from redis import Redis from redis._compat import unicode from redis import ConnectionError, ResponseError pytestmark = skip_if_server_version_lt('2.9.0') def test_set_node_name(s): """ Test that method sets ["name"] correctly """ n = {"host": "127.0.0.1", "port": 7000} s.connection_pool.nodes.set_node_name(n) assert "name" in n assert n["name"] == "127.0.0.1:7000" def test_keyslot(): """ Test that method will compute correct key in all supported cases """ n = NodeManager([{}]) assert n.keyslot("foo") == 12182 assert n.keyslot("{foo}bar") == 12182 assert n.keyslot("{foo}") == 12182 assert n.keyslot(1337) == 4314 assert n.keyslot(125) == n.keyslot(b"125") assert n.keyslot(125) == n.keyslot("\x31\x32\x35") assert n.keyslot("大奖") == n.keyslot(b"\xe5\xa4\xa7\xe5\xa5\x96") assert n.keyslot(u"大奖") == n.keyslot(b"\xe5\xa4\xa7\xe5\xa5\x96") assert n.keyslot(1337.1234) == n.keyslot("1337.1234") assert n.keyslot(1337) == n.keyslot("1337") assert n.keyslot(b"abc") == n.keyslot("abc") assert n.keyslot("abc") == n.keyslot(unicode("abc")) assert n.keyslot(unicode("abc")) == n.keyslot(b"abc") def test_init_slots_cache_not_all_slots(s): """ Test that if not all slots are covered it should raise an exception """ # Create wrapper function so we can inject custom 'CLUSTER SLOTS' command result def get_redis_link_wrapper(*args, **kwargs): link = Redis(host="127.0.0.1", port=7000, decode_responses=True) orig_exec_method = link.execute_command def patch_execute_command(*args, **kwargs): if args == ('cluster', 'slots'): # Missing slot 5460 return [ [0, 5459, [b'127.0.0.1', 7000], [b'127.0.0.1', 7003]], [5461, 10922, [b'127.0.0.1', 7001], [b'127.0.0.1', 7004]], [10923, 16383, [b'127.0.0.1', 7002], [b'127.0.0.1', 7005]], ] return orig_exec_method(*args, **kwargs) # Missing slot 5460 link.execute_command = patch_execute_command return link s.connection_pool.nodes.get_redis_link = get_redis_link_wrapper with pytest.raises(RedisClusterException) as ex: s.connection_pool.nodes.initialize() assert unicode(ex.value).startswith("All slots are not covered after query all startup_nodes.") def test_init_slots_cache_not_all_slots_not_require_full_coverage(s): """ Test that if not all slots are covered it should raise an exception """ # Create wrapper function so we can inject custom 'CLUSTER SLOTS' command result def get_redis_link_wrapper(*args, **kwargs): link = Redis(host="127.0.0.1", port=7000, decode_responses=True) orig_exec_method = link.execute_command def patch_execute_command(*args, **kwargs): if args == ('cluster', 'slots'): # Missing slot 5460 return [ [0, 5459, [b'127.0.0.1', 7000], [b'127.0.0.1', 7003]], [5461, 10922, [b'127.0.0.1', 7001], [b'127.0.0.1', 7004]], [10923, 16383, [b'127.0.0.1', 7002], [b'127.0.0.1', 7005]], ] elif args == ('CONFIG GET', 'cluster-require-full-coverage'): return {'cluster-require-full-coverage': 'no'} else: return orig_exec_method(*args, **kwargs) # Missing slot 5460 link.execute_command = patch_execute_command return link s.connection_pool.nodes.get_redis_link = get_redis_link_wrapper s.connection_pool.nodes.initialize() assert 5460 not in s.connection_pool.nodes.slots def test_init_slots_cache(s): """ Test that slots cache can in initialized and all slots are covered """ good_slots_resp = [ [0, 5460, [b'127.0.0.1', 7000], [b'127.0.0.2', 7003]], [5461, 10922, [b'127.0.0.1', 7001], [b'127.0.0.2', 7004]], [10923, 16383, [b'127.0.0.1', 7002], [b'127.0.0.2', 7005]], ] with patch.object(Redis, 'execute_command') as execute_command_mock: def patch_execute_command(*args, **kwargs): if args == ('CONFIG GET', 'cluster-require-full-coverage'): return {'cluster-require-full-coverage': 'yes'} else: return good_slots_resp execute_command_mock.side_effect = patch_execute_command s.connection_pool.nodes.initialize() assert len(s.connection_pool.nodes.slots) == NodeManager.RedisClusterHashSlots for slot_info in good_slots_resp: all_hosts = [b'127.0.0.1', b'127.0.0.2'] all_ports = [7000, 7001, 7002, 7003, 7004, 7005] slot_start = slot_info[0] slot_end = slot_info[1] for i in range(slot_start, slot_end + 1): assert len(s.connection_pool.nodes.slots[i]) == len(slot_info[2:]) assert s.connection_pool.nodes.slots[i][0]['host'] in all_hosts assert s.connection_pool.nodes.slots[i][1]['host'] in all_hosts assert s.connection_pool.nodes.slots[i][0]['port'] in all_ports assert s.connection_pool.nodes.slots[i][1]['port'] in all_ports assert len(s.connection_pool.nodes.nodes) == 6 def test_empty_startup_nodes(): """ It should not be possible to create a node manager with no nodes specefied """ with pytest.raises(RedisClusterException): NodeManager() with pytest.raises(RedisClusterException): NodeManager([]) def test_wrong_startup_nodes_type(): """ If something other then a list type itteratable is provided it should fail """ with pytest.raises(RedisClusterException): NodeManager({}) def test_init_slots_cache_slots_collision(): """ Test that if 2 nodes do not agree on the same slots setup it should raise an error. In this test both nodes will say that the first slots block should be bound to different servers. """ n = NodeManager(startup_nodes=[ {"host": "127.0.0.1", "port": 7000}, {"host": "127.0.0.1", "port": 7001}, ]) def monkey_link(host=None, port=None, *args, **kwargs): """ Helper function to return custom slots cache data from different redis nodes """ if port == 7000: result = [[0, 5460, [b'127.0.0.1', 7000], [b'127.0.0.1', 7003]], [5461, 10922, [b'127.0.0.1', 7001], [b'127.0.0.1', 7004]]] elif port == 7001: result = [[0, 5460, [b'127.0.0.1', 7001], [b'127.0.0.1', 7003]], [5461, 10922, [b'127.0.0.1', 7000], [b'127.0.0.1', 7004]]] else: result = [] r = RedisCluster(host=host, port=port, decode_responses=True) orig_execute_command = r.execute_command def execute_command(*args, **kwargs): if args == ("cluster", "slots"): return result elif args == ('CONFIG GET', 'cluster-require-full-coverage'): return {'cluster-require-full-coverage': 'yes'} else: return orig_execute_command(*args, **kwargs) r.execute_command = execute_command return r n.get_redis_link = monkey_link with pytest.raises(RedisClusterException) as ex: n.initialize() assert unicode(ex.value).startswith("startup_nodes could not agree on a valid slots cache."), unicode(ex.value) def test_all_nodes(): """ Set a list of nodes and it should be possible to itterate over all """ n = NodeManager(startup_nodes=[{"host": "127.0.0.1", "port": 7000}]) n.initialize() nodes = [node for node in n.nodes.values()] for i, node in enumerate(n.all_nodes()): assert node in nodes def test_all_nodes_masters(): """ Set a list of nodes with random masters/slaves config and it shold be possible to itterate over all of them. """ n = NodeManager(startup_nodes=[{"host": "127.0.0.1", "port": 7000}, {"host": "127.0.0.1", "port": 7001}]) n.initialize() nodes = [node for node in n.nodes.values() if node['server_type'] == 'master'] for node in n.all_masters(): assert node in nodes def test_random_startup_node(): """ Hard to test reliable for a random """ s = [{"1": 1}, {"2": 2}, {"3": 3}], n = NodeManager(startup_nodes=s) random_node = n.random_startup_node() for i in range(0, 5): assert random_node in s def test_random_startup_node_ittr(): """ Hard to test reliable for a random function """ s = [{"1": 1}, {"2": 2}, {"3": 3}], n = NodeManager(startup_nodes=s) for i, node in enumerate(n.random_startup_node_ittr()): if i == 5: break assert node in s def test_cluster_slots_error(): """ Check that exception is raised if initialize can't execute 'CLUSTER SLOTS' command. """ with patch.object(Redis, 'execute_command') as execute_command_mock: execute_command_mock.side_effect = Exception("foobar") n = NodeManager(startup_nodes=[{"host": "127.0.0.1", "port": 7000}]) with pytest.raises(RedisClusterException) as e: n.initialize() assert "ERROR sending 'cluster slots' command" in unicode(e) def test_cluster_slots_error_expected_responseerror(): """ Check that exception is not raised if initialize can't execute 'CLUSTER SLOTS' command but can hit other nodes. """ with patch.object(Redis, 'execute_command') as execute_command_mock: execute_command_mock.side_effect = ResponseError("MASTERDOWN") n = NodeManager(startup_nodes=[{"host": "127.0.0.1", "port": 7000}]) with pytest.raises(RedisClusterException) as e: n.initialize() assert 'Redis Cluster cannot be connected' in unicode(e) def test_set_node(): """ Test to update data in a slot. """ expected = { "host": "127.0.0.1", "name": "127.0.0.1:7000", "port": 7000, "server_type": "master", } n = NodeManager(startup_nodes=[{}]) assert len(n.slots) == 0, "no slots should exist" res = n.set_node(host="127.0.0.1", port=7000, server_type="master") assert res == expected assert n.nodes == {expected['name']: expected} def test_reset(): """ Test that reset method resets variables back to correct default values. """ n = NodeManager(startup_nodes=[{}]) n.initialize = Mock() n.reset() assert n.initialize.call_count == 1 def test_cluster_one_instance(): """ If the cluster exists of only 1 node then there is some hacks that must be validated they work. """ with patch.object(Redis, 'execute_command') as mock_execute_command: return_data = [[0, 16383, ['', 7006]]] def patch_execute_command(*args, **kwargs): if args == ('CONFIG GET', 'cluster-require-full-coverage'): return {'cluster-require-full-coverage': 'yes'} else: return return_data # mock_execute_command.return_value = return_data mock_execute_command.side_effect = patch_execute_command n = NodeManager(startup_nodes=[{"host": "127.0.0.1", "port": 7006}]) n.initialize() assert n.nodes == {"127.0.0.1:7006": { 'host': '127.0.0.1', 'name': '127.0.0.1:7006', 'port': 7006, 'server_type': 'master', }} assert len(n.slots) == 16384 for i in range(0, 16384): assert n.slots[i] == [{ "host": "127.0.0.1", "name": "127.0.0.1:7006", "port": 7006, "server_type": "master", }] def test_initialize_follow_cluster(): n = NodeManager(nodemanager_follow_cluster=True, startup_nodes=[{'host': '127.0.0.1', 'port': 7000}]) n.orig_startup_nodes = None n.initialize() def test_init_with_down_node(): """ If I can't connect to one of the nodes, everything should still work. But if I can't connect to any of the nodes, exception should be thrown. """ def get_redis_link(host, port, decode_responses=False): if port == 7000: raise ConnectionError('mock connection error for 7000') return Redis(host=host, port=port, decode_responses=decode_responses) with patch.object(NodeManager, 'get_redis_link', side_effect=get_redis_link): n = NodeManager(startup_nodes=[{"host": "127.0.0.1", "port": 7000}]) with pytest.raises(RedisClusterException) as e: n.initialize() assert 'Redis Cluster cannot be connected' in unicode(e.value) redis-py-cluster-2.0.0/tests/test_pipeline.py000066400000000000000000000524461352661744600213350ustar00rootroot00000000000000# -*- coding: utf-8 -*- # python std lib from __future__ import unicode_literals import re # rediscluster imports from rediscluster.client import RedisCluster from rediscluster.connection import ClusterConnectionPool, ClusterReadOnlyConnectionPool from rediscluster.exceptions import RedisClusterException from tests.conftest import _get_client, skip_if_server_version_lt # 3rd party imports import pytest from mock import patch from redis._compat import unichr, unicode from redis.exceptions import WatchError, ResponseError, ConnectionError class TestPipeline(object): """ """ def test_pipeline(self, r): with r.pipeline() as pipe: (pipe.set('a', 'a1') .get('a') .zadd('z', {'z1': 1}) .zadd('z', {'z2': 4}) .zincrby('z', 1, 'z1') .zrange('z', 0, 5, withscores=True)) assert pipe.execute() == [ True, b'a1', True, True, 2.0, [(b'z1', 2.0), (b'z2', 4)], ] def test_pipeline_length(self, r): with r.pipeline() as pipe: # Initially empty. assert len(pipe) == 0 assert not pipe # Fill 'er up! pipe.set('a', 'a1').set('b', 'b1').set('c', 'c1') assert len(pipe) == 3 assert pipe # Execute calls reset(), so empty once again. pipe.execute() assert len(pipe) == 0 assert not pipe def test_pipeline_no_transaction(self, r): with r.pipeline(transaction=False) as pipe: pipe.set('a', 'a1').set('b', 'b1').set('c', 'c1') assert pipe.execute() == [True, True, True] assert r['a'] == b'a1' assert r['b'] == b'b1' assert r['c'] == b'c1' def test_pipeline_eval(self, r): with r.pipeline(transaction=False) as pipe: pipe.eval("return {KEYS[1],KEYS[2],ARGV[1],ARGV[2]}", 2, "A{foo}", "B{foo}", "first", "second") res = pipe.execute()[0] assert res[0] == b'A{foo}' assert res[1] == b'B{foo}' assert res[2] == b'first' assert res[3] == b'second' @pytest.mark.xfail(reason="unsupported command: watch") def test_pipeline_no_transaction_watch(self, r): r['a'] = 0 with r.pipeline(transaction=False) as pipe: pipe.watch('a') a = pipe.get('a') pipe.multi() pipe.set('a', int(a) + 1) assert pipe.execute() == [True] @pytest.mark.xfail(reason="unsupported command: watch") def test_pipeline_no_transaction_watch_failure(self, r): r['a'] = 0 with r.pipeline(transaction=False) as pipe: pipe.watch('a') a = pipe.get('a') r['a'] = 'bad' pipe.multi() pipe.set('a', int(a) + 1) with pytest.raises(WatchError): pipe.execute() assert r['a'] == b'bad' def test_exec_error_in_response(self, r): """ an invalid pipeline command at exec time adds the exception instance to the list of returned values """ r['c'] = 'a' with r.pipeline() as pipe: pipe.set('a', 1).set('b', 2).lpush('c', 3).set('d', 4) result = pipe.execute(raise_on_error=False) assert result[0] assert r['a'] == b'1' assert result[1] assert r['b'] == b'2' # we can't lpush to a key that's a string value, so this should # be a ResponseError exception assert isinstance(result[2], ResponseError) assert r['c'] == b'a' # since this isn't a transaction, the other commands after the # error are still executed assert result[3] assert r['d'] == b'4' # make sure the pipe was restored to a working state assert pipe.set('z', 'zzz').execute() == [True] assert r['z'] == b'zzz' def test_exec_error_raised(self, r): r['c'] = 'a' with r.pipeline() as pipe: pipe.set('a', 1).set('b', 2).lpush('c', 3).set('d', 4) with pytest.raises(ResponseError) as ex: pipe.execute() assert unicode(ex.value).startswith('Command # 3 (LPUSH c 3) of ' 'pipeline caused error: ') # make sure the pipe was restored to a working state assert pipe.set('z', 'zzz').execute() == [True] assert r['z'] == b'zzz' def test_transaction_with_empty_error_command(self, r): """ Commands with custom EMPTY_ERROR functionality return their default values in the pipeline no matter the raise_on_error preference """ for error_switch in (True, False): with r.pipeline() as pipe: pipe.set('a', 1).get("").set('c', 3) result = pipe.execute(raise_on_error=error_switch) assert result[0] assert result[1] == None assert result[2] def test_pipeline_with_empty_error_command(self, r): """ Commands with custom EMPTY_ERROR functionality return their default values in the pipeline no matter the raise_on_error preference """ for error_switch in (True, False): with r.pipeline(transaction=False) as pipe: pipe.set('a', 1).get("").set('c', 3) result = pipe.execute(raise_on_error=error_switch) assert result[0] assert result[1] == None assert result[2] def test_parse_error_raised(self, r): with r.pipeline() as pipe: # the zrem is invalid because we don't pass any keys to it pipe.set('a', 1).zrem('b').set('b', 2) with pytest.raises(ResponseError) as ex: pipe.execute() assert unicode(ex.value).startswith('Command # 2 (ZREM b) of ' 'pipeline caused error: ') # make sure the pipe was restored to a working state assert pipe.set('z', 'zzz').execute() == [True] assert r['z'] == b'zzz' @pytest.mark.xfail(reason="unsupported command: watch") def test_watch_succeed(self, r): r['a'] = 1 r['b'] = 2 with r.pipeline() as pipe: pipe.watch('a', 'b') assert pipe.watching a_value = pipe.get('a') b_value = pipe.get('b') assert a_value == b'1' assert b_value == b'2' pipe.multi() pipe.set('c', 3) assert pipe.execute() == [True] assert not pipe.watching @pytest.mark.xfail(reason="unsupported command: watch") def test_watch_failure(self, r): r['a'] = 1 r['b'] = 2 with r.pipeline() as pipe: pipe.watch('a', 'b') r['b'] = 3 pipe.multi() pipe.get('a') with pytest.raises(WatchError): pipe.execute() assert not pipe.watching @pytest.mark.xfail(reason="unsupported command: watch") def test_unwatch(self, r): r['a'] = 1 r['b'] = 2 with r.pipeline() as pipe: pipe.watch('a', 'b') r['b'] = 3 pipe.unwatch() assert not pipe.watching pipe.get('a') assert pipe.execute() == [b'1'] @pytest.mark.xfail(reason="unsupported command: watch") def test_transaction_callable(self, r): r['a'] = 1 r['b'] = 2 has_run = [] def my_transaction(pipe): a_value = pipe.get('a') assert a_value in (b'1', b'2') b_value = pipe.get('b') assert b_value == b'2' # silly run-once code... incr's "a" so WatchError should be raised # forcing this all to run again. this should incr "a" once to "2" if not has_run: r.incr('a') has_run.append('it has') pipe.multi() pipe.set('c', int(a_value) + int(b_value)) result = r.transaction(my_transaction, 'a', 'b') assert result == [True] assert r['c'] == b'4' def test_exec_error_in_no_transaction_pipeline(self, r): r['a'] = 1 with r.pipeline(transaction=False) as pipe: pipe.llen('a') pipe.expire('a', 100) with pytest.raises(ResponseError) as ex: pipe.execute() assert unicode(ex.value).startswith('Command # 1 (LLEN a) of ' 'pipeline caused error: ') assert r['a'] == b'1' def test_exec_error_in_no_transaction_pipeline_unicode_command(self, r): key = unichr(3456) + u'abcd' + unichr(3421) r[key] = 1 with r.pipeline(transaction=False) as pipe: pipe.llen(key) pipe.expire(key, 100) with pytest.raises(ResponseError) as ex: pipe.execute() expected = unicode('Command # 1 (LLEN {0}) of pipeline caused error: ').format(key) assert unicode(ex.value).startswith(expected) assert r[key] == b'1' @skip_if_server_version_lt('3.2.0') def test_pipeline_with_bitfield(self, r): with r.pipeline() as pipe: pipe.set('a', '1') bf = pipe.bitfield('b') pipe2 = (bf .set('u8', 8, 255) .get('u8', 0) .get('u4', 8) # 1111 .get('u4', 12) # 1111 .get('u4', 13) # 1110 .execute()) pipe.get('a') response = pipe.execute() assert pipe == pipe2 assert response == [True, [0, 0, 15, 15, 14], b'1'] def test_blocked_methods(self, r): """ Currently some method calls on a Cluster pipeline is blocked when using in cluster mode. They maybe implemented in the future. """ pipe = r.pipeline(transaction=False) with pytest.raises(RedisClusterException): pipe.multi() with pytest.raises(RedisClusterException): pipe.immediate_execute_command() with pytest.raises(RedisClusterException): pipe._execute_transaction(None, None, None) with pytest.raises(RedisClusterException): pipe.load_scripts() with pytest.raises(RedisClusterException): pipe.watch() with pytest.raises(RedisClusterException): pipe.unwatch() with pytest.raises(RedisClusterException): pipe.script_load_for_pipeline(None) with pytest.raises(RedisClusterException): pipe.transaction(None) def test_blocked_arguments(self, r): """ Currently some arguments is blocked when using in cluster mode. They maybe implemented in the future. """ with pytest.raises(RedisClusterException) as ex: r.pipeline(transaction=True) assert unicode(ex.value).startswith("transaction is deprecated in cluster mode"), True with pytest.raises(RedisClusterException) as ex: r.pipeline(shard_hint=True) assert unicode(ex.value).startswith("shard_hint is deprecated in cluster mode"), True def test_redis_cluster_pipeline(self): """ Test that we can use a pipeline with the RedisCluster class """ r = _get_client(RedisCluster) with r.pipeline(transaction=False) as pipe: pipe.get("foobar") def test_mget_disabled(self, r): with r.pipeline(transaction=False) as pipe: with pytest.raises(RedisClusterException): pipe.mget(['a']) def test_mset_disabled(self, r): with r.pipeline(transaction=False) as pipe: with pytest.raises(RedisClusterException): pipe.mset({'a': 1, 'b': 2}) def test_rename_disabled(self, r): with r.pipeline(transaction=False) as pipe: with pytest.raises(RedisClusterException): pipe.rename('a', 'b') def test_renamenx_disabled(self, r): with r.pipeline(transaction=False) as pipe: with pytest.raises(RedisClusterException): pipe.renamenx('a', 'b') def test_delete_single(self, r): r['a'] = 1 with r.pipeline(transaction=False) as pipe: pipe.delete('a') assert pipe.execute(), True def test_multi_delete_unsupported(self, r): with r.pipeline(transaction=False) as pipe: r['a'] = 1 r['b'] = 2 with pytest.raises(RedisClusterException): pipe.delete('a', 'b') def test_brpoplpush_disabled(self, r): with r.pipeline(transaction=False) as pipe: with pytest.raises(RedisClusterException): pipe.brpoplpush() def test_rpoplpush_disabled(self, r): with r.pipeline(transaction=False) as pipe: with pytest.raises(RedisClusterException): pipe.rpoplpush() def test_sort_disabled(self, r): with r.pipeline(transaction=False) as pipe: with pytest.raises(RedisClusterException): pipe.sort() def test_sdiff_disabled(self, r): with r.pipeline(transaction=False) as pipe: with pytest.raises(RedisClusterException): pipe.sdiff() def test_sdiffstore_disabled(self, r): with r.pipeline(transaction=False) as pipe: with pytest.raises(RedisClusterException): pipe.sdiffstore() def test_sinter_disabled(self, r): with r.pipeline(transaction=False) as pipe: with pytest.raises(RedisClusterException): pipe.sinter() def test_sinterstore_disabled(self, r): with r.pipeline(transaction=False) as pipe: with pytest.raises(RedisClusterException): pipe.sinterstore() def test_smove_disabled(self, r): with r.pipeline(transaction=False) as pipe: with pytest.raises(RedisClusterException): pipe.smove() def test_sunion_disabled(self, r): with r.pipeline(transaction=False) as pipe: with pytest.raises(RedisClusterException): pipe.sunion() def test_sunionstore_disabled(self, r): with r.pipeline(transaction=False) as pipe: with pytest.raises(RedisClusterException): pipe.sunionstore() def test_spfmerge_disabled(self, r): with r.pipeline(transaction=False) as pipe: with pytest.raises(RedisClusterException): pipe.pfmerge() def test_multi_key_operation_with_shared_shards(self, r): pipe = r.pipeline(transaction=False) pipe.set('a{foo}', 1) pipe.set('b{foo}', 2) pipe.set('c{foo}', 3) pipe.set('bar', 4) pipe.set('bazz', 5) pipe.get('a{foo}') pipe.get('b{foo}') pipe.get('c{foo}') pipe.get('bar') pipe.get('bazz') res = pipe.execute() assert res == [True, True, True, True, True, b'1', b'2', b'3', b'4', b'5'] @pytest.mark.xfail(reson="perform_execute_pipeline is not used any longer") def test_connection_error(self, r): test = self test._calls = [] def perform_execute_pipeline(pipe): if not test._calls: e = ConnectionError('test') test._calls.append({'exception': e}) return [e] result = pipe.execute(raise_on_error=False) test._calls.append({'result': result}) return result pipe = r.pipeline(transaction=False) orig_perform_execute_pipeline = pipe.perform_execute_pipeline pipe.perform_execute_pipeline = perform_execute_pipeline try: pipe.set('foo', 1) res = pipe.execute() assert res, [True] assert isinstance(test._calls[0]['exception'], ConnectionError) if len(test._calls) == 2: assert test._calls[1] == {'result': [True]} else: assert isinstance(test._calls[1]['result'][0], ResponseError) assert test._calls[2] == {'result': [True]} finally: pipe.perform_execute_pipeline = orig_perform_execute_pipeline del test._calls @pytest.mark.xfail(reson="perform_execute_pipeline is not used any longer") def test_asking_error(self, r): test = self test._calls = [] def perform_execute_pipeline(pipe): if not test._calls: e = ResponseError("ASK {0} 127.0.0.1:7003".format(r.keyslot('foo'))) test._calls.append({'exception': e}) return [e, e] result = pipe.execute(raise_on_error=False) test._calls.append({'result': result}) return result pipe = r.pipeline(transaction=False) orig_perform_execute_pipeline = pipe.perform_execute_pipeline pipe.perform_execute_pipeline = perform_execute_pipeline try: pipe.set('foo', 1) pipe.get('foo') res = pipe.execute() assert res == [True, b'1'] assert isinstance(test._calls[0]['exception'], ResponseError) assert re.match("ASK", str(test._calls[0]['exception'])) assert isinstance(test._calls[1]['result'][0], ResponseError) assert re.match("MOVED", str(test._calls[1]['result'][0])) assert test._calls[2] == {'result': [True, b'1']} finally: pipe.perform_execute_pipeline = orig_perform_execute_pipeline del test._calls def test_empty_stack(self, r): """ If pipeline is executed with no commands it should return a empty list. """ p = r.pipeline() result = p.execute() assert result == [] class TestReadOnlyPipeline(object): def test_pipeline_readonly(self, r, ro): """ On readonly mode, we supports get related stuff only. """ r.set('foo71', 'a1') # we assume this key is set on 127.0.0.1:7001 r.zadd('foo88', {'z1': 1}) # we assume this key is set on 127.0.0.1:7002 r.zadd('foo88', {'z2': 4}) with ro.pipeline() as readonly_pipe: readonly_pipe.get('foo71').zrange('foo88', 0, 5, withscores=True) assert readonly_pipe.execute() == [ b'a1', [(b'z1', 1.0), (b'z2', 4)], ] def assert_moved_redirection_on_slave(self, connection_pool_cls, cluster_obj): with patch.object(connection_pool_cls, 'get_node_by_slot') as return_slave_mock: with patch.object(ClusterConnectionPool, 'get_master_node_by_slot') as return_master_mock: def get_mock_node(role, port): return { 'name': '127.0.0.1:{0}'.format(port), 'host': '127.0.0.1', 'port': port, 'server_type': role, } return_slave_mock.return_value = get_mock_node('slave', 7005) return_master_mock.return_value = get_mock_node('slave', 7001) with cluster_obj.pipeline() as pipe: # we assume this key is set on 127.0.0.1:7001(7004) pipe.get('foo87').get('foo88').execute() == [None, None] def test_moved_redirection_on_slave_with_default(self): """ On Pipeline, we redirected once and finally get from master with readonly client when data is completely moved. """ self.assert_moved_redirection_on_slave( ClusterConnectionPool, RedisCluster(host="127.0.0.1", port=7000, reinitialize_steps=1) ) def test_moved_redirection_on_slave_with_readonly_mode_client(self): """ Ditto with READONLY mode. """ self.assert_moved_redirection_on_slave( ClusterReadOnlyConnectionPool, RedisCluster(host="127.0.0.1", port=7000, readonly_mode=True, reinitialize_steps=1) ) def test_access_correct_slave_with_readonly_mode_client(self, sr): """ Test that the client can get value normally with readonly mode when we connect to correct slave. """ # we assume this key is set on 127.0.0.1:7001 sr.set('foo87', 'foo') sr.set('foo88', 'bar') import time time.sleep(1) with patch.object(ClusterReadOnlyConnectionPool, 'get_node_by_slot') as return_slave_mock: return_slave_mock.return_value = { 'name': '127.0.0.1:7004', 'host': '127.0.0.1', 'port': 7004, 'server_type': 'slave', } master_value = {'host': '127.0.0.1', 'name': '127.0.0.1:7001', 'port': 7001, 'server_type': 'master'} with patch.object( ClusterConnectionPool, 'get_master_node_by_slot', return_value=master_value) as return_master_mock: readonly_client = RedisCluster(host="127.0.0.1", port=7000, readonly_mode=True) with readonly_client.pipeline() as readonly_pipe: assert readonly_pipe.get('foo88').get('foo87').execute() == [b'bar', b'foo'] redis-py-cluster-2.0.0/tests/test_pubsub.py000066400000000000000000000473721352661744600210320ustar00rootroot00000000000000# -*- coding: utf-8 -*- # python std lib from __future__ import unicode_literals import threading import time # rediscluster imports from rediscluster.client import RedisCluster # 3rd party imports import pytest # import redis from redis import Redis from redis.exceptions import ConnectionError from redis._compat import basestring, unichr from .conftest import _get_client from .conftest import skip_if_server_version_lt, skip_if_redis_py_version_lt def wait_for_message(pubsub, timeout=0.5, ignore_subscribe_messages=False): now = time.time() timeout = now + timeout while now < timeout: message = pubsub.get_message( ignore_subscribe_messages=ignore_subscribe_messages) if message is not None: return message time.sleep(0.01) now = time.time() return None def make_message(type, channel, data, pattern=None): return { 'type': type, 'pattern': pattern and pattern.encode('utf-8') or None, 'channel': channel and channel.encode('utf-8') or None, 'data': data.encode('utf-8') if isinstance(data, basestring) else data } def make_subscribe_test_data(pubsub, type): if type == 'channel': return { 'p': pubsub, 'sub_type': 'subscribe', 'unsub_type': 'unsubscribe', 'sub_func': pubsub.subscribe, 'unsub_func': pubsub.unsubscribe, 'keys': ['foo', 'bar', 'uni' + unichr(4456) + 'code'] } elif type == 'pattern': return { 'p': pubsub, 'sub_type': 'psubscribe', 'unsub_type': 'punsubscribe', 'sub_func': pubsub.psubscribe, 'unsub_func': pubsub.punsubscribe, 'keys': ['f*', 'b*', 'uni' + unichr(4456) + '*'] } assert False, 'invalid subscribe type: {0}'.format(type) class TestPubSubSubscribeUnsubscribe(object): def _test_subscribe_unsubscribe(self, p, sub_type, unsub_type, sub_func, unsub_func, keys): for key in keys: assert sub_func(key) is None # should be a message for each channel/pattern we just subscribed to for i, key in enumerate(keys): assert wait_for_message(p) == make_message(sub_type, key, i + 1) for key in keys: assert unsub_func(key) is None # should be a message for each channel/pattern we just unsubscribed # from for i, key in enumerate(keys): i = len(keys) - 1 - i assert wait_for_message(p) == make_message(unsub_type, key, i) def test_channel_subscribe_unsubscribe(self, r): kwargs = make_subscribe_test_data(r.pubsub(), 'channel') self._test_subscribe_unsubscribe(**kwargs) def test_pattern_subscribe_unsubscribe(self, r): kwargs = make_subscribe_test_data(r.pubsub(), 'pattern') self._test_subscribe_unsubscribe(**kwargs) def _test_resubscribe_on_reconnection(self, p, sub_type, sub_func, keys, *args, **kwargs): for key in keys: assert sub_func(key) is None # should be a message for each channel/pattern we just subscribed to for i, key in enumerate(keys): assert wait_for_message(p) == make_message(sub_type, key, i + 1) # manually disconnect p.connection.disconnect() # calling get_message again reconnects and resubscribes # note, we may not re-subscribe to channels in exactly the same order # so we have to do some extra checks to make sure we got them all messages = [] for i, _ in enumerate(keys): messages.append(wait_for_message(p)) unique_channels = set() assert len(messages) == len(keys) for i, message in enumerate(messages): assert message['type'] == sub_type assert message['data'] == i + 1 assert isinstance(message['channel'], bytes) channel = message['channel'].decode('utf-8') unique_channels.add(channel) assert len(unique_channels) == len(keys) for channel in unique_channels: assert channel in keys def test_resubscribe_to_channels_on_reconnection(self, r): kwargs = make_subscribe_test_data(r.pubsub(), 'channel') self._test_resubscribe_on_reconnection(**kwargs) def test_resubscribe_to_patterns_on_reconnection(self, r): kwargs = make_subscribe_test_data(r.pubsub(), 'pattern') self._test_resubscribe_on_reconnection(**kwargs) def _test_subscribed_property(self, p, sub_type, unsub_type, sub_func, unsub_func, keys): assert p.subscribed is False sub_func(keys[0]) # we're now subscribed even though we haven't processed the # reply from the server just yet assert p.subscribed is True assert wait_for_message(p) == make_message(sub_type, keys[0], 1) # we're still subscribed assert p.subscribed is True # unsubscribe from all channels unsub_func() # we're still technically subscribed until we process the # response messages from the server assert p.subscribed is True assert wait_for_message(p) == make_message(unsub_type, keys[0], 0) # now we're no longer subscribed as no more messages can be delivered # to any channels we were listening to assert p.subscribed is False # subscribing again flips the flag back sub_func(keys[0]) assert p.subscribed is True assert wait_for_message(p) == make_message(sub_type, keys[0], 1) # unsubscribe again unsub_func() assert p.subscribed is True # subscribe to another channel before reading the unsubscribe response sub_func(keys[1]) assert p.subscribed is True # read the unsubscribe for key1 assert wait_for_message(p) == make_message(unsub_type, keys[0], 0) # we're still subscribed to key2, so subscribed should still be True assert p.subscribed is True # read the key2 subscribe message assert wait_for_message(p) == make_message(sub_type, keys[1], 1) unsub_func() # haven't read the message yet, so we're still subscribed assert p.subscribed is True assert wait_for_message(p) == make_message(unsub_type, keys[1], 0) # now we're finally unsubscribed assert p.subscribed is False def test_subscribe_property_with_channels(self, r): kwargs = make_subscribe_test_data(r.pubsub(), 'channel') self._test_subscribed_property(**kwargs) def test_subscribe_property_with_patterns(self, r): kwargs = make_subscribe_test_data(r.pubsub(), 'pattern') self._test_subscribed_property(**kwargs) def test_ignore_all_subscribe_messages(self, r): p = r.pubsub(ignore_subscribe_messages=True) checks = ( (p.subscribe, 'foo'), (p.unsubscribe, 'foo'), # (p.psubscribe, 'f*'), # (p.punsubscribe, 'f*'), ) assert p.subscribed is False for func, channel in checks: assert func(channel) is None assert p.subscribed is True assert wait_for_message(p) is None assert p.subscribed is False def test_ignore_individual_subscribe_messages(self, r): p = r.pubsub() checks = ( (p.subscribe, 'foo'), (p.unsubscribe, 'foo'), # (p.psubscribe, 'f*'), # (p.punsubscribe, 'f*'), ) assert p.subscribed is False for func, channel in checks: assert func(channel) is None assert p.subscribed is True message = wait_for_message(p, ignore_subscribe_messages=True) assert message is None assert p.subscribed is False class TestPubSubMessages(object): """ Bug: Currently in cluster mode publish command will behave different then in standard/non cluster mode. See (docs/Pubsub.md) for details. Currently Redis instances will be used to test pubsub because they are easier to work with. """ def get_strict_redis_node(self, port, host="127.0.0.1"): return Redis(port=port, host=host) def setup_method(self, *args): self.message = None def message_handler(self, message): self.message = message def test_published_message_to_channel(self): node = self.get_strict_redis_node(7000) p = node.pubsub(ignore_subscribe_messages=True) p.subscribe('foo') assert node.publish('foo', 'test message') == 1 message = wait_for_message(p) assert isinstance(message, dict) assert message == make_message('message', 'foo', 'test message') # Cleanup pubsub connections p.close() @pytest.mark.xfail(reason="This test is buggy and fails randomly") def test_publish_message_to_channel_other_server(self): """ Test that pubsub still works across the cluster on different nodes """ node_subscriber = self.get_strict_redis_node(7000) p = node_subscriber.pubsub(ignore_subscribe_messages=True) p.subscribe('foo') node_sender = self.get_strict_redis_node(7001) # This should return 0 because of no connected clients to this server. assert node_sender.publish('foo', 'test message') == 0 message = wait_for_message(p) assert isinstance(message, dict) assert message == make_message('message', 'foo', 'test message') # Cleanup pubsub connections p.close() @pytest.mark.xfail(reason="Pattern pubsub do not work currently") def test_published_message_to_pattern(self, r): p = r.pubsub(ignore_subscribe_messages=True) p.subscribe('foo') p.psubscribe('f*') # 1 to pattern, 1 to channel assert r.publish('foo', 'test message') == 2 message1 = wait_for_message(p) message2 = wait_for_message(p) assert isinstance(message1, dict) assert isinstance(message2, dict) expected = [ make_message('message', 'foo', 'test message'), make_message('pmessage', 'foo', 'test message', pattern='f*') ] assert message1 in expected assert message2 in expected assert message1 != message2 def test_channel_message_handler(self, r): p = r.pubsub(ignore_subscribe_messages=True) p.subscribe(foo=self.message_handler) assert r.publish('foo', 'test message') == 1 assert wait_for_message(p) is None assert self.message == make_message('message', 'foo', 'test message') @pytest.mark.xfail(reason="Pattern pubsub do not work currently") def test_pattern_message_handler(self, r): p = r.pubsub(ignore_subscribe_messages=True) p.psubscribe(**{'f*': self.message_handler}) assert r.publish('foo', 'test message') == 1 assert wait_for_message(p) is None assert self.message == make_message('pmessage', 'foo', 'test message', pattern='f*') @pytest.mark.xfail(reason="Pattern pubsub do not work currently") def test_unicode_channel_message_handler(self, r): p = r.pubsub(ignore_subscribe_messages=True) channel = 'uni' + unichr(4456) + 'code' channels = {channel: self.message_handler} print(channels) p.subscribe(**channels) assert r.publish(channel, 'test message') == 1 assert wait_for_message(p) is None assert self.message == make_message('message', channel, 'test message') @pytest.mark.xfail(reason="Pattern pubsub do not work currently") def test_unicode_pattern_message_handler(self, r): p = r.pubsub(ignore_subscribe_messages=True) pattern = 'uni' + unichr(4456) + '*' channel = 'uni' + unichr(4456) + 'code' p.psubscribe(**{pattern: self.message_handler}) assert r.publish(channel, 'test message') == 1 assert wait_for_message(p) is None assert self.message == make_message('pmessage', channel, 'test message', pattern=pattern) class TestPubSubAutoDecoding(object): "These tests only validate that we get unicode values back" channel = 'uni' + unichr(4456) + 'code' pattern = 'uni' + unichr(4456) + '*' data = 'abc' + unichr(4458) + '123' def make_message(self, type, channel, data, pattern=None): return { 'type': type, 'channel': channel, 'pattern': pattern, 'data': data } def setup_method(self, *args): self.message = None def message_handler(self, message): self.message = message def test_channel_subscribe_unsubscribe(self, o): p = o.pubsub() p.subscribe(self.channel) assert wait_for_message(p) == self.make_message('subscribe', self.channel, 1) p.unsubscribe(self.channel) assert wait_for_message(p) == self.make_message('unsubscribe', self.channel, 0) @pytest.mark.xfail(reason="Pattern pubsub do not work currently") def test_pattern_subscribe_unsubscribe(self, o): p = o.pubsub() p.psubscribe(self.pattern) assert wait_for_message(p) == self.make_message('psubscribe', self.pattern, 1) p.punsubscribe(self.pattern) assert wait_for_message(p) == self.make_message('punsubscribe', self.pattern, 0) def test_channel_publish(self, o): p = o.pubsub(ignore_subscribe_messages=True) p.subscribe(self.channel) o.publish(self.channel, self.data) assert wait_for_message(p) == self.make_message('message', self.channel, self.data) @pytest.mark.xfail(reason="Pattern pubsub do not work currently") def test_pattern_publish(self, o): p = o.pubsub(ignore_subscribe_messages=True) p.psubscribe(self.pattern) o.publish(self.channel, self.data) assert wait_for_message(p) == self.make_message('pmessage', self.channel, self.data, pattern=self.pattern) def test_channel_message_handler(self, o): p = o.pubsub(ignore_subscribe_messages=True) p.subscribe(**{self.channel: self.message_handler}) o.publish(self.channel, self.data) assert wait_for_message(p) is None assert self.message == self.make_message('message', self.channel, self.data) # test that we reconnected to the correct channel p.connection.disconnect() assert wait_for_message(p) is None # should reconnect new_data = self.data + 'new data' o.publish(self.channel, new_data) assert wait_for_message(p) is None assert self.message == self.make_message('message', self.channel, new_data) @pytest.mark.xfail(reason="Pattern pubsub do not work currently") def test_pattern_message_handler(self, o): p = o.pubsub(ignore_subscribe_messages=True) p.psubscribe(**{self.pattern: self.message_handler}) o.publish(self.channel, self.data) assert wait_for_message(p) is None assert self.message == self.make_message('pmessage', self.channel, self.data, pattern=self.pattern) # test that we reconnected to the correct pattern p.connection.disconnect() assert wait_for_message(p) is None # should reconnect new_data = self.data + 'new data' o.publish(self.channel, new_data) assert wait_for_message(p) is None assert self.message == self.make_message('pmessage', self.channel, new_data, pattern=self.pattern) class TestPubSubRedisDown(object): def test_channel_subscribe(self, r): r = Redis(host='localhost', port=6390) p = r.pubsub() with pytest.raises(ConnectionError): p.subscribe('foo') def test_pubsub_thread_publish(): """ This test will never fail but it will still show and be viable to use and to test the threading capability of the connectionpool and the publish mechanism. """ startup_nodes = [{"host": "127.0.0.1", "port": "7000"}] r = RedisCluster( startup_nodes=startup_nodes, decode_responses=True, max_connections=16, max_connections_per_node=16, ) def t_run(rc): for i in range(0, 50): rc.publish('foo', 'bar') rc.publish('bar', 'foo') rc.publish('asd', 'dsa') rc.publish('dsa', 'asd') rc.publish('qwe', 'bar') rc.publish('ewq', 'foo') rc.publish('wer', 'dsa') rc.publish('rew', 'asd') # Use this for debugging # print(rc.connection_pool._available_connections) # print(rc.connection_pool._in_use_connections) # print(rc.connection_pool._created_connections) try: threads = [] for i in range(10): t = threading.Thread(target=t_run, args=(r,)) threads.append(t) t.start() except Exception: print("Error: unable to start thread") class TestPubSubPubSubSubcommands(object): @skip_if_server_version_lt('2.8.0') @pytest.mark.xfail(reason="Pubsub is not fully supported in cluster mode") def test_pubsub_channels(self, r): p = r.pubsub(ignore_subscribe_messages=True) p.subscribe('foo', 'bar', 'baz', 'quux') channels = sorted(r.pubsub_channels()) assert channels == [b'bar', b'baz', b'foo', b'quux'] @skip_if_server_version_lt('2.8.0') @pytest.mark.xfail(reason="Pubsub is not fully supported in cluster mode") def test_pubsub_numsub(self, r): p1 = r.pubsub(ignore_subscribe_messages=True) p1.subscribe('foo', 'bar', 'baz') p2 = r.pubsub(ignore_subscribe_messages=True) p2.subscribe('bar', 'baz') p3 = r.pubsub(ignore_subscribe_messages=True) p3.subscribe('baz') channels = [(b'foo', 1), (b'bar', 2), (b'baz', 3)] assert channels == r.pubsub_numsub('foo', 'bar', 'baz') @skip_if_server_version_lt('2.8.0') @pytest.mark.xfail(reason="Pubsub is not fully supported in cluster mode") def test_pubsub_numpat(self, r): p = r.pubsub(ignore_subscribe_messages=True) p.psubscribe('*oo', '*ar', 'b*z') assert r.pubsub_numpat() == 3 class TestPubSubPings(object): @skip_if_server_version_lt('3.0.0') @pytest.mark.xfail(reason="Pubsub is not fully supported in cluster mode") def test_send_pubsub_ping(self, r): p = r.pubsub(ignore_subscribe_messages=True) p.subscribe('foo') p.ping() assert wait_for_message(p) == make_message(type='pong', channel=None, data='', pattern=None) @skip_if_server_version_lt('3.0.0') @pytest.mark.xfail(reason="Pattern pubsub is not fully supported in cluster mode") def test_send_pubsub_ping_message(self, r): p = r.pubsub(ignore_subscribe_messages=True) p.subscribe('foo') p.ping(message='hello world') assert wait_for_message(p) == make_message(type='pong', channel=None, data='hello world', pattern=None) redis-py-cluster-2.0.0/tests/test_scripting.py000066400000000000000000000123771352661744600215310ustar00rootroot00000000000000# -*- coding: utf-8 -*- # python std lib from __future__ import unicode_literals # rediscluster imports from rediscluster.exceptions import RedisClusterException # 3rd party imports from redis import exceptions import pytest multiply_script = """ local value = redis.call('GET', KEYS[1]) value = tonumber(value) return value * ARGV[1]""" msgpack_hello_script = """ local message = cmsgpack.unpack(ARGV[1]) local name = message['name'] return "hello " .. name """ msgpack_hello_script_broken = """ local message = cmsgpack.unpack(ARGV[1]) local names = message['name'] return "hello " .. name """ class TestScripting(object): @pytest.fixture(autouse=True) def reset_scripts(self, r): r.script_flush() def test_eval(self, r): r.set('a', 2) # 2 * 3 == 6 assert r.eval(multiply_script, 1, 'a', 3) == 6 def test_eval_same_slot(self, r): r.set('A{foo}', 2) r.set('B{foo}', 4) # 2 * 4 == 8 script = """ local value = redis.call('GET', KEYS[1]) local value2 = redis.call('GET', KEYS[2]) return value * value2 """ result = r.eval(script, 2, 'A{foo}', 'B{foo}') assert result == 8 def test_eval_crossslot(self, r): """ This test assumes that {foo} and {bar} will not go to the same server when used. In 3 masters + 3 slaves config this should pass. """ r.set('A{foo}', 2) r.set('B{bar}', 4) # 2 * 4 == 8 script = """ local value = redis.call('GET', KEYS[1]) local value2 = redis.call('GET', KEYS[2]) return value * value2 """ with pytest.raises(RedisClusterException): r.eval(script, 2, 'A{foo}', 'B{bar}') def test_evalsha(self, r): r.set('a', 2) sha = r.script_load(multiply_script) # 2 * 3 == 6 assert r.evalsha(sha, 1, 'a', 3) == 6 def test_evalsha_script_not_loaded(self, r): r.set('a', 2) sha = r.script_load(multiply_script) # remove the script from Redis's cache r.script_flush() with pytest.raises(exceptions.NoScriptError): r.evalsha(sha, 1, 'a', 3) def test_script_loading(self, r): # get the sha, then clear the cache sha = r.script_load(multiply_script) r.script_flush() assert r.script_exists(sha) == [False] r.script_load(multiply_script) assert r.script_exists(sha) == [True] def test_script_object(self, r): r.set('a', 2) multiply = r.register_script(multiply_script) precalculated_sha = multiply.sha assert precalculated_sha assert r.script_exists(multiply.sha) == [False] # Test second evalsha block (after NoScriptError) assert multiply(keys=['a'], args=[3]) == 6 # At this point, the script should be loaded assert r.script_exists(multiply.sha) == [True] # Test that the precalculated sha matches the one from redis assert multiply.sha == precalculated_sha # Test first evalsha block assert multiply(keys=['a'], args=[3]) == 6 @pytest.mark.xfail(reason="Script object not supported in cluster") def test_script_object_in_pipeline(self, r): multiply = r.register_script(multiply_script) precalculated_sha = multiply.sha assert precalculated_sha pipe = r.pipeline() pipe.set('a', 2) pipe.get('a') multiply(keys=['a'], args=[3], client=pipe) assert r.script_exists(multiply.sha) == [False] # [SET worked, GET 'a', result of multiple script] assert pipe.execute() == [True, b'2', 6] # The script should have been loaded by pipe.execute() assert r.script_exists(multiply.sha) == [True] # The precalculated sha should have been the correct one assert multiply.sha == precalculated_sha # purge the script from redis's cache and re-run the pipeline # the multiply script should be reloaded by pipe.execute() r.script_flush() pipe = r.pipeline() pipe.set('a', 2) pipe.get('a') multiply(keys=['a'], args=[3], client=pipe) assert r.script_exists(multiply.sha) == [False] # [SET worked, GET 'a', result of multiple script] assert pipe.execute() == [True, b'2', 6] assert r.script_exists(multiply.sha) == [True] @pytest.mark.xfail(reason="LUA is not supported in cluster") def test_eval_msgpack_pipeline_error_in_lua(self, r): msgpack_hello = r.register_script(msgpack_hello_script) assert msgpack_hello.sha pipe = r.pipeline() # avoiding a dependency to msgpack, this is the output of # msgpack.dumps({"name": "joe"}) msgpack_message_1 = b'\x81\xa4name\xa3Joe' msgpack_hello(args=[msgpack_message_1], client=pipe) assert r.script_exists(msgpack_hello.sha) == [False] assert pipe.execute()[0] == b'hello Joe' assert r.script_exists(msgpack_hello.sha) == [True] msgpack_hello_broken = r.register_script(msgpack_hello_script_broken) msgpack_hello_broken(args=[msgpack_message_1], client=pipe) with pytest.raises(exceptions.ResponseError) as excinfo: pipe.execute() assert excinfo.type == exceptions.ResponseError redis-py-cluster-2.0.0/tests/test_utils.py000066400000000000000000000121651352661744600206620ustar00rootroot00000000000000# -*- coding: utf-8 -*- # python std lib from __future__ import with_statement # rediscluster imports from rediscluster.exceptions import ( RedisClusterException, ClusterDownError ) from rediscluster.utils import ( string_keys_to_dict, dict_merge, blocked_command, merge_result, first_key, clusterdown_wrapper, parse_cluster_slots, ) # 3rd party imports import pytest from redis._compat import unicode def test_parse_cluster_slots(): """ Example raw output from redis cluster. Output is form a redis 3.2.x node that includes the id in the reponse. The test below that do not include the id is to validate that the code is compatible with redis versions that do not contain that value in the response from the server. 127.0.0.1:10000> cluster slots 1) 1) (integer) 5461 2) (integer) 10922 3) 1) "10.0.0.1" 2) (integer) 10000 3) "3588b4cf9fc72d57bb262a024747797ead0cf7ea" 4) 1) "10.0.0.4" 2) (integer) 10000 3) "a72c02c7d85f4ec3145ab2c411eefc0812aa96b0" 2) 1) (integer) 10923 2) (integer) 16383 3) 1) "10.0.0.2" 2) (integer) 10000 3) "ffd36d8d7cb10d813f81f9662a835f6beea72677" 4) 1) "10.0.0.5" 2) (integer) 10000 3) "5c15b69186017ddc25ebfac81e74694fc0c1a160" 3) 1) (integer) 0 2) (integer) 5460 3) 1) "10.0.0.3" 2) (integer) 10000 3) "069cda388c7c41c62abe892d9e0a2d55fbf5ffd5" 4) 1) "10.0.0.6" 2) (integer) 10000 3) "dc152a08b4cf1f2a0baf775fb86ad0938cb907dc" """ mock_response = [ [0, 5460, ['172.17.0.2', 7000], ['172.17.0.2', 7003]], [5461, 10922, ['172.17.0.2', 7001], ['172.17.0.2', 7004]], [10923, 16383, ['172.17.0.2', 7002], ['172.17.0.2', 7005]] ] parse_cluster_slots(mock_response) extended_mock_response = [ [0, 5460, ['172.17.0.2', 7000, 'ffd36d8d7cb10d813f81f9662a835f6beea72677'], ['172.17.0.2', 7003, '5c15b69186017ddc25ebfac81e74694fc0c1a160']], [5461, 10922, ['172.17.0.2', 7001, '069cda388c7c41c62abe892d9e0a2d55fbf5ffd5'], ['172.17.0.2', 7004, 'dc152a08b4cf1f2a0baf775fb86ad0938cb907dc']], [10923, 16383, ['172.17.0.2', 7002, '3588b4cf9fc72d57bb262a024747797ead0cf7ea'], ['172.17.0.2', 7005, 'a72c02c7d85f4ec3145ab2c411eefc0812aa96b0']] ] parse_cluster_slots(extended_mock_response) mock_binary_response = [ [0, 5460, [b'172.17.0.2', 7000], [b'172.17.0.2', 7003]], [5461, 10922, [b'172.17.0.2', 7001], [b'172.17.0.2', 7004]], [10923, 16383, [b'172.17.0.2', 7002], [b'172.17.0.2', 7005]] ] parse_cluster_slots(mock_binary_response) extended_mock_binary_response = [ [0, 5460, [b'172.17.0.2', 7000, b'ffd36d8d7cb10d813f81f9662a835f6beea72677'], [b'172.17.0.2', 7003, b'5c15b69186017ddc25ebfac81e74694fc0c1a160']], [5461, 10922, [b'172.17.0.2', 7001, b'069cda388c7c41c62abe892d9e0a2d55fbf5ffd5'], [b'172.17.0.2', 7004, b'dc152a08b4cf1f2a0baf775fb86ad0938cb907dc']], [10923, 16383, [b'172.17.0.2', 7002, b'3588b4cf9fc72d57bb262a024747797ead0cf7ea'], [b'172.17.0.2', 7005, b'a72c02c7d85f4ec3145ab2c411eefc0812aa96b0']] ] extended_mock_parsed = { (0, 5460): {'master': ('172.17.0.2', 7000), 'slaves': [('172.17.0.2', 7003)]}, (5461, 10922): {'master': ('172.17.0.2', 7001), 'slaves': [('172.17.0.2', 7004)]}, (10923, 16383): {'master': ('172.17.0.2', 7002), 'slaves': [('172.17.0.2', 7005)]} } assert parse_cluster_slots(extended_mock_binary_response) == extended_mock_parsed def test_string_keys_to(): def mock_true(): return True assert string_keys_to_dict(["FOO", "BAR"], mock_true) == {"FOO": mock_true, "BAR": mock_true} def test_dict_merge(): a = {"a": 1} b = {"b": 2} c = {"c": 3} assert dict_merge(a, b, c) == {"a": 1, "b": 2, "c": 3} def test_dict_merge_value_error(): with pytest.raises(ValueError): dict_merge([]) def test_blocked_command(): with pytest.raises(RedisClusterException) as ex: blocked_command(None, "SET") assert unicode(ex.value) == "Command: SET is blocked in redis cluster mode" def test_merge_result(): assert merge_result("foobar", {"a": [1, 2, 3], "b": [4, 5, 6]}) == [1, 2, 3, 4, 5, 6] assert merge_result("foobar", {"a": [1, 2, 3], "b": [1, 2, 3]}) == [1, 2, 3] def test_merge_result_value_error(): with pytest.raises(ValueError): merge_result("foobar", []) def test_first_key(): assert first_key("foobar", {"foo": 1}) == 1 with pytest.raises(RedisClusterException) as ex: first_key("foobar", {"foo": 1, "bar": 2}) assert unicode(ex.value).startswith("More then 1 result from command: foobar") def test_first_key_value_error(): with pytest.raises(ValueError): first_key("foobar", None) def test_clusterdown_wrapper(): @clusterdown_wrapper def bad_func(): raise ClusterDownError("CLUSTERDOWN") with pytest.raises(ClusterDownError) as cex: bad_func() assert unicode(cex.value).startswith("CLUSTERDOWN error. Unable to rebuild the cluster") redis-py-cluster-2.0.0/tox.ini000066400000000000000000000025021352661744600162540ustar00rootroot00000000000000# Tox (http://tox.testrun.org/) is a tool for running tests in # multiple virtualenvs. This configuration file will run the # test suite on all supported python versions. To use it, "pip # install tox" and then run "tox" from this directory. [tox] envlist = py27, py34, py35, py36, py37, hi27, hi34, hi35, hi36, hi37, flake8-py34, flake8-py27 [testenv] deps = -r{toxinidir}/dev-requirements.txt commands = python {envbindir}/coverage run --source rediscluster -p -m py.test [testenv:hi27] basepython = python2.7 deps = -r{toxinidir}/dev-requirements.txt hiredis == 0.2.0 [testenv:hi34] basepython = python3.4 deps = -r{toxinidir}/dev-requirements.txt hiredis == 0.2.0 [testenv:hi35] basepython = python3.5 deps = -r{toxinidir}/dev-requirements.txt hiredis == 0.2.0 [testenv:hi36] basepython = python3.6 deps = -r{toxinidir}/dev-requirements.txt hiredis == 0.2.0 [testenv:hi37] basepython = python3.7 deps = -r{toxinidir}/dev-requirements.txt hiredis == 0.2.0 [testenv:flake8-py34] basepython= python3.4 deps = flake8==2.2.5 commands = flake8 --show-source --exclude=.venv,.tox,dist,docs,build,.git --ignore=E501,E731,E402 . [testenv:flake8-py27] basepython= python2.7 deps = flake8==2.2.5 commands = flake8 --show-source --exclude=.venv,.tox,dist,docs,build,.git --ignore=E501,E731,E402 .