[
  {
    "path": ".craft.yml",
    "content": "minVersion: \"0.18.0\"\ngithub:\n  owner: getsentry\n  repo: rb\nchangelog: CHANGES\nchangelogPolicy: auto\nstatusProvider:\n  name: github\nartifactProvider:\n  name: github\ntargets:\n  - name: pypi\n  - name: github\n  - name: sentry-pypi\n    internalPypiRepo: getsentry/pypi\n\nrequireNames:\n  - /^rb-.+-py2.py3-none-any.whl$/\n"
  },
  {
    "path": ".github/workflows/build.yml",
    "content": "name: build\n\non:\n  push:\n    branches:\n      - master\n      - release/**\n\njobs:\n  dist:\n    name: Wheels\n    runs-on: ubuntu-latest\n\n    steps:\n      - uses: actions/checkout@34e114876b0b11c390a56381ad16ebd13914f8d5 # v4\n      - uses: actions/setup-python@a26af69be951a213d495a4c3e4e4022e16d87065 # v5\n      - run: |\n          pip install wheel\n          python setup.py bdist_wheel\n      - uses: actions/upload-artifact@ea165f8d65b6e75b540449e92b4886f43607fa02 # v4\n        with:\n          name: ${{ github.sha }}\n          path: dist/*\n"
  },
  {
    "path": ".github/workflows/release.yml",
    "content": "name: release\n\non:\n  workflow_dispatch:\n    inputs:\n      version:\n        description: Version to release\n        required: true\n      force:\n        description: Force a release even when there are release-blockers (optional)\n        required: false\n\njobs:\n  release:\n    runs-on: ubuntu-latest\n    name: \"Release a new version\"\n    steps:\n      - name: Get auth token\n        id: token\n        uses: actions/create-github-app-token@5d869da34e18e7287c1daad50e0b8ea0f506ce69 # v1.11.0\n        with:\n          app-id: ${{ vars.SENTRY_RELEASE_BOT_CLIENT_ID }}\n          private-key: ${{ secrets.SENTRY_RELEASE_BOT_PRIVATE_KEY }}\n\n      - uses: actions/checkout@34e114876b0b11c390a56381ad16ebd13914f8d5 # v4\n        with:\n          token: ${{ steps.token.outputs.token }}\n          fetch-depth: 0\n\n      - name: Prepare release\n        uses: getsentry/action-prepare-release@c8e1c2009ab08259029170132c384f03c1064c0e # v1\n        env:\n          GITHUB_TOKEN: ${{ steps.token.outputs.token }}\n        with:\n          version: ${{ github.event.inputs.version }}\n          force: ${{ github.event.inputs.force }}\n"
  },
  {
    "path": ".github/workflows/test.yml",
    "content": "name: test\n\non:\n  push:\n    branches:\n      - master\n      - release/**\n  pull_request:\n\njobs:\n  test:\n    name: Run tests\n    runs-on: ${{ matrix.os }}\n    strategy:\n      matrix:\n        os: [ubuntu-latest, macos-latest]\n        python: [\"3.8\", \"3.9\", \"3.10\", \"pypy-3.8\"]\n        REDIS_VERSION: [\"<3\", \"<4\", \"<5\"]\n    env:\n      REDIS_VERSION: ${{ matrix.REDIS_VERSION }}\n    steps:\n      - uses: actions/checkout@f43a0e5ff2bd294095638e18286ca9a3d1956744 # v3\n      - name: Setup Python\n        uses: actions/setup-python@a26af69be951a213d495a4c3e4e4022e16d87065 # v5\n        with:\n          python-version: ${{ matrix.python }}\n\n      - name: Install dependencies\n        run: |\n          python -m pip install --upgrade pip\n          pip install pytest\n          pip install --editable .\n      - name: Install Redis\n        run: |\n          if [ \"$RUNNER_OS\" == \"Linux\" ]; then\n            sudo apt update && sudo apt install redis-server --no-install-recommends -y\n          elif [ \"$RUNNER_OS\" == \"macOS\" ]; then\n            brew install --quiet redis\n          else\n            echo \"$RUNNER_OS not supported\"\n            exit 1\n          fi\n      - name: Run tests\n        run: |\n          make test\n  collector:\n    needs: [test]\n    if: always()\n    runs-on: ubuntu-latest\n    steps:\n      - name: Check for failures\n        if: contains(needs.*.result, 'failure') || contains(needs.*.result, 'cancelled')\n        run: |\n          echo \"One of the dependent jobs have failed. You may need to re-run it.\" && exit 1\n"
  },
  {
    "path": ".gitignore",
    "content": "docs/_build\n*.pyc\n*.pyo\n.DS_Store\n.cache/\nbuild\ndist\n*.egg-info\n"
  },
  {
    "path": ".python-version",
    "content": "3.8\n"
  },
  {
    "path": "CHANGES",
    "content": "Rb Changelog\n============\n\n1.10.0\n------\n\n### Various fixes & improvements\n\n- add internal pypi deploy to `rb` (#54) by @asottile-sentry\n- set fetch-depth: 0 for release (#53) by @asottile-sentry\n- add compat for redis 5.x (#52) by @asottile-sentry\n- fix CI (#51) by @asottile-sentry\n\n1.9.0\n-----\n\n- Redis compatibility for 3.4.1\n\n1.8\n-----------\n\n- Python 3.6 compatibility\n- Redis compatibility for versions >=2.6,<3.4\n\n1.7\n-----------\n\n(released Jun 23rd 2017)\n\n- Ensure a connection is released to the pool after receiving a response, even\n  if the result is an error.\n\n1.6\n-----------\n\n(released Nov 23rd 2016)\n\n- Support `options` keyword arguments passed to `execute_command`.\n\n1.5\n-----------\n\n(released Nov 23rd 2016)\n\n- Detect dead connections on pool checkout.\n\n1.4\n-----------\n\n(released on Feb 8th 2015)\n\n- Fixed cluster for host defaults support.\n- Changed poller to handle close explicitly.  This should prevent\n  bad loops in when the socket closes while writing.\n- Added support for execute_commands.\n\n1.3.1\n-------------\n\n(released on Oct 13th 2015)\n\n- Fixed an illogical constructor for the local client.\n- Fixed a problem with clearing out pending batches.\n- Hosts are now validated to not have holes in the two shipped routers\n  which both depend on a gapless setup.\n- Connection errors now try to print out the original IO error's infos.\n\n1.3\n-----------\n\n(released on Oct 7th 2015)\n\n- Quickly fixed `target_key`'s behavior to make sense so that the\n  result on the promise is the value instead of a dictionary of a\n  single host.\n\n1.2\n-----------\n\n(released on Oct 7th 2015)\n\n- Added `target_key` to the fanout client to simplify targeting of hosts.\n\n1.1.2\n-------------\n\n(released on Sep 28th 2015)\n\n- Fixed command buffers for disabled max concurrency.\n- Fixed map manager timeouts.\n\n1.1.1\n-------------\n\n(released on Sep 15th 2015)\n\n- Made rb work with older versions of pyredis.\n\n1.1\n-----------\n\n(released on Sep 9th 2015)\n\n- Added internal support for async writes which improves performance\n  and parallelism with large command batches where the command is\n  larger than the kernel buffer size.\n\n1.0\n-----------\n\n(released on Sep 4th 2015)\n\n- Added support for automatic batching of GET and SET to MGET and MSET.\n- Added emulated `mget` and `mset` commands to promise based clients.\n- Fixed a bug with the HostInfo not comparing correctly.\n- Added support for epoll as an alternative to poll.\n"
  },
  {
    "path": "LICENSE",
    "content": "\n                              Apache License\n                        Version 2.0, January 2004\n                     http://www.apache.org/licenses/\n\nTERMS AND CONDITIONS FOR USE, REPRODUCTION, AND DISTRIBUTION\n\n1. Definitions.\n\n   \"License\" shall mean the terms and conditions for use, reproduction,\n   and distribution as defined by Sections 1 through 9 of this document.\n\n   \"Licensor\" shall mean the copyright owner or entity authorized by\n   the copyright owner that is granting the License.\n\n   \"Legal Entity\" shall mean the union of the acting entity and all\n   other entities that control, are controlled by, or are under common\n   control with that entity. For the purposes of this definition,\n   \"control\" means (i) the power, direct or indirect, to cause the\n   direction or management of such entity, whether by contract or\n   otherwise, or (ii) ownership of fifty percent (50%) or more of the\n   outstanding shares, or (iii) beneficial ownership of such entity.\n\n   \"You\" (or \"Your\") shall mean an individual or Legal Entity\n   exercising permissions granted by this License.\n\n   \"Source\" form shall mean the preferred form for making modifications,\n   including but not limited to software source code, documentation\n   source, and configuration files.\n\n   \"Object\" form shall mean any form resulting from mechanical\n   transformation or translation of a Source form, including but\n   not limited to compiled object code, generated documentation,\n   and conversions to other media types.\n\n   \"Work\" shall mean the work of authorship, whether in Source or\n   Object form, made available under the License, as indicated by a\n   copyright notice that is included in or attached to the work\n   (an example is provided in the Appendix below).\n\n   \"Derivative Works\" shall mean any work, whether in Source or Object\n   form, that is based on (or derived from) the Work and for which the\n   editorial revisions, annotations, elaborations, or other modifications\n   represent, as a whole, an original work of authorship. For the purposes\n   of this License, Derivative Works shall not include works that remain\n   separable from, or merely link (or bind by name) to the interfaces of,\n   the Work and Derivative Works thereof.\n\n   \"Contribution\" shall mean any work of authorship, including\n   the original version of the Work and any modifications or additions\n   to that Work or Derivative Works thereof, that is intentionally\n   submitted to Licensor for inclusion in the Work by the copyright owner\n   or by an individual or Legal Entity authorized to submit on behalf of\n   the copyright owner. For the purposes of this definition, \"submitted\"\n   means any form of electronic, verbal, or written communication sent\n   to the Licensor or its representatives, including but not limited to\n   communication on electronic mailing lists, source code control systems,\n   and issue tracking systems that are managed by, or on behalf of, the\n   Licensor for the purpose of discussing and improving the Work, but\n   excluding communication that is conspicuously marked or otherwise\n   designated in writing by the copyright owner as \"Not a Contribution.\"\n\n   \"Contributor\" shall mean Licensor and any individual or Legal Entity\n   on behalf of whom a Contribution has been received by Licensor and\n   subsequently incorporated within the Work.\n\n2. Grant of Copyright License. Subject to the terms and conditions of\n   this License, each Contributor hereby grants to You a perpetual,\n   worldwide, non-exclusive, no-charge, royalty-free, irrevocable\n   copyright license to reproduce, prepare Derivative Works of,\n   publicly display, publicly perform, sublicense, and distribute the\n   Work and such Derivative Works in Source or Object form.\n\n3. Grant of Patent License. Subject to the terms and conditions of\n   this License, each Contributor hereby grants to You a perpetual,\n   worldwide, non-exclusive, no-charge, royalty-free, irrevocable\n   (except as stated in this section) patent license to make, have made,\n   use, offer to sell, sell, import, and otherwise transfer the Work,\n   where such license applies only to those patent claims licensable\n   by such Contributor that are necessarily infringed by their\n   Contribution(s) alone or by combination of their Contribution(s)\n   with the Work to which such Contribution(s) was submitted. If You\n   institute patent litigation against any entity (including a\n   cross-claim or counterclaim in a lawsuit) alleging that the Work\n   or a Contribution incorporated within the Work constitutes direct\n   or contributory patent infringement, then any patent licenses\n   granted to You under this License for that Work shall terminate\n   as of the date such litigation is filed.\n\n4. Redistribution. You may reproduce and distribute copies of the\n   Work or Derivative Works thereof in any medium, with or without\n   modifications, and in Source or Object form, provided that You\n   meet the following conditions:\n\n   (a) You must give any other recipients of the Work or\n       Derivative Works a copy of this License; and\n\n   (b) You must cause any modified files to carry prominent notices\n       stating that You changed the files; and\n\n   (c) You must retain, in the Source form of any Derivative Works\n       that You distribute, all copyright, patent, trademark, and\n       attribution notices from the Source form of the Work,\n       excluding those notices that do not pertain to any part of\n       the Derivative Works; and\n\n   (d) If the Work includes a \"NOTICE\" text file as part of its\n       distribution, then any Derivative Works that You distribute must\n       include a readable copy of the attribution notices contained\n       within such NOTICE file, excluding those notices that do not\n       pertain to any part of the Derivative Works, in at least one\n       of the following places: within a NOTICE text file distributed\n       as part of the Derivative Works; within the Source form or\n       documentation, if provided along with the Derivative Works; or,\n       within a display generated by the Derivative Works, if and\n       wherever such third-party notices normally appear. The contents\n       of the NOTICE file are for informational purposes only and\n       do not modify the License. You may add Your own attribution\n       notices within Derivative Works that You distribute, alongside\n       or as an addendum to the NOTICE text from the Work, provided\n       that such additional attribution notices cannot be construed\n       as modifying the License.\n\n   You may add Your own copyright statement to Your modifications and\n   may provide additional or different license terms and conditions\n   for use, reproduction, or distribution of Your modifications, or\n   for any such Derivative Works as a whole, provided Your use,\n   reproduction, and distribution of the Work otherwise complies with\n   the conditions stated in this License.\n\n5. Submission of Contributions. Unless You explicitly state otherwise,\n   any Contribution intentionally submitted for inclusion in the Work\n   by You to the Licensor shall be under the terms and conditions of\n   this License, without any additional terms or conditions.\n   Notwithstanding the above, nothing herein shall supersede or modify\n   the terms of any separate license agreement you may have executed\n   with Licensor regarding such Contributions.\n\n6. Trademarks. This License does not grant permission to use the trade\n   names, trademarks, service marks, or product names of the Licensor,\n   except as required for reasonable and customary use in describing the\n   origin of the Work and reproducing the content of the NOTICE file.\n\n7. Disclaimer of Warranty. Unless required by applicable law or\n   agreed to in writing, Licensor provides the Work (and each\n   Contributor provides its Contributions) on an \"AS IS\" BASIS,\n   WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or\n   implied, including, without limitation, any warranties or conditions\n   of TITLE, NON-INFRINGEMENT, MERCHANTABILITY, or FITNESS FOR A\n   PARTICULAR PURPOSE. You are solely responsible for determining the\n   appropriateness of using or redistributing the Work and assume any\n   risks associated with Your exercise of permissions under this License.\n\n8. Limitation of Liability. In no event and under no legal theory,\n   whether in tort (including negligence), contract, or otherwise,\n   unless required by applicable law (such as deliberate and grossly\n   negligent acts) or agreed to in writing, shall any Contributor be\n   liable to You for damages, including any direct, indirect, special,\n   incidental, or consequential damages of any character arising as a\n   result of this License or out of the use or inability to use the\n   Work (including but not limited to damages for loss of goodwill,\n   work stoppage, computer failure or malfunction, or any and all\n   other commercial damages or losses), even if such Contributor\n   has been advised of the possibility of such damages.\n\n9. Accepting Warranty or Additional Liability. While redistributing\n   the Work or Derivative Works thereof, You may choose to offer,\n   and charge a fee for, acceptance of support, warranty, indemnity,\n   or other liability obligations and/or rights consistent with this\n   License. However, in accepting such obligations, You may act only\n   on Your own behalf and on Your sole responsibility, not on behalf\n   of any other Contributor, and only if You agree to indemnify,\n   defend, and hold each Contributor harmless for any liability\n   incurred by, or claims asserted against, such Contributor by reason\n   of your accepting any such warranty or additional liability.\n\nEND OF TERMS AND CONDITIONS\n\nAPPENDIX: How to apply the Apache License to your work.\n\n   To apply the Apache License to your work, attach the following\n   boilerplate notice, with the fields enclosed by brackets \"[]\"\n   replaced with your own identifying information. (Don't include\n   the brackets!)  The text should be enclosed in the appropriate\n   comment syntax for the file format. We also recommend that a\n   file or class name and description of purpose be included on the\n   same \"printed page\" as the copyright notice for easier\n   identification within third-party archives.\n\nCopyright 2011-2012 DISQUS\nCopyright 2015 Functional Software Inc.\n\nLicensed under the Apache License, Version 2.0 (the \"License\");\nyou may not use this file except in compliance with the License.\nYou may obtain a copy of the License at\n\n    http://www.apache.org/licenses/LICENSE-2.0\n\nUnless required by applicable law or agreed to in writing, software\ndistributed under the License is distributed on an \"AS IS\" BASIS,\nWITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\nSee the License for the specific language governing permissions and\nlimitations under the License.\n"
  },
  {
    "path": "Makefile",
    "content": "setup-git:\n\t@echo \"--> Installing git hooks\"\n\t@pip install flake8\n\t@cd .git/hooks && ln -sf ../../hooks/* ./\n\ntest:\n\t@py.test -vv --tb=short\n"
  },
  {
    "path": "README.md",
    "content": "# rb [![test](https://github.com/getsentry/rb/actions/workflows/test.yml/badge.svg)](https://github.com/getsentry/rb/actions/workflows/test.yml)\n\n![logo](https://github.com/getsentry/rb/blob/master/docs/_static/rb.png?raw=true)\n\nrb - the redis blaster.\n\nThe fastest way to talk to many redis nodes.  Can do routing as well as\nblindly blasting commands to many nodes.  How does it work?\n\nFor full documentation see [rb.rtfd.org](http://rb.rtfd.org/)\n\n## Quickstart\n\nSet up a cluster:\n\n```python\nfrom rb import Cluster\n\ncluster = Cluster({\n    0: {'port': 6379},\n    1: {'port': 6380},\n    2: {'port': 6381},\n    3: {'port': 6382},\n}, host_defaults={\n    'host': '127.0.0.1',\n})\n```\n\nAutomatic routing:\n\n```python\nresults = []\nwith cluster.map() as client:\n    for key in range(100):\n        client.get(key).then(lambda x: results.append(int(x or 0)))\n\nprint('Sum: %s' % sum(results))\n```\n\nFanout:\n\n```python\nwith cluster.fanout(hosts=[0, 1, 2, 3]) as client:\n    infos = client.info()\n```\n\nFanout to all:\n\n```python\nwith cluster.fanout(hosts='all') as client:\n    client.flushdb()\n```\n"
  },
  {
    "path": "docs/Makefile",
    "content": "# Makefile for Sphinx documentation\n#\n\n# You can set these variables from the command line.\nSPHINXOPTS    =\nSPHINXBUILD   = sphinx-build\nPAPER         =\nBUILDDIR      = _build\n\n# Internal variables.\nPAPEROPT_a4     = -D latex_paper_size=a4\nPAPEROPT_letter = -D latex_paper_size=letter\nALLSPHINXOPTS   = -d $(BUILDDIR)/doctrees $(PAPEROPT_$(PAPER)) $(SPHINXOPTS) .\n\n.PHONY: help clean html dirhtml singlehtml pickle json htmlhelp qthelp devhelp epub latex latexpdf text man changes linkcheck doctest\n\nhelp:\n\t@echo \"Please use \\`make <target>' where <target> is one of\"\n\t@echo \"  html       to make standalone HTML files\"\n\t@echo \"  dirhtml    to make HTML files named index.html in directories\"\n\t@echo \"  singlehtml to make a single large HTML file\"\n\t@echo \"  pickle     to make pickle files\"\n\t@echo \"  json       to make JSON files\"\n\t@echo \"  htmlhelp   to make HTML files and a HTML help project\"\n\t@echo \"  qthelp     to make HTML files and a qthelp project\"\n\t@echo \"  devhelp    to make HTML files and a Devhelp project\"\n\t@echo \"  epub       to make an epub\"\n\t@echo \"  latex      to make LaTeX files, you can set PAPER=a4 or PAPER=letter\"\n\t@echo \"  latexpdf   to make LaTeX files and run them through pdflatex\"\n\t@echo \"  text       to make text files\"\n\t@echo \"  man        to make manual pages\"\n\t@echo \"  changes    to make an overview of all changed/added/deprecated items\"\n\t@echo \"  linkcheck  to check all external links for integrity\"\n\t@echo \"  doctest    to run all doctests embedded in the documentation (if enabled)\"\n\nclean:\n\t-rm -rf $(BUILDDIR)/*\n\nhtml:\n\t$(SPHINXBUILD) -b html $(ALLSPHINXOPTS) $(BUILDDIR)/html\n\t@echo\n\t@echo \"Build finished. The HTML pages are in $(BUILDDIR)/html.\"\n\ndirhtml:\n\t$(SPHINXBUILD) -b dirhtml $(ALLSPHINXOPTS) $(BUILDDIR)/dirhtml\n\t@echo\n\t@echo \"Build finished. The HTML pages are in $(BUILDDIR)/dirhtml.\"\n\nsinglehtml:\n\t$(SPHINXBUILD) -b singlehtml $(ALLSPHINXOPTS) $(BUILDDIR)/singlehtml\n\t@echo\n\t@echo \"Build finished. The HTML page is in $(BUILDDIR)/singlehtml.\"\n\npickle:\n\t$(SPHINXBUILD) -b pickle $(ALLSPHINXOPTS) $(BUILDDIR)/pickle\n\t@echo\n\t@echo \"Build finished; now you can process the pickle files.\"\n\njson:\n\t$(SPHINXBUILD) -b json $(ALLSPHINXOPTS) $(BUILDDIR)/json\n\t@echo\n\t@echo \"Build finished; now you can process the JSON files.\"\n\nhtmlhelp:\n\t$(SPHINXBUILD) -b htmlhelp $(ALLSPHINXOPTS) $(BUILDDIR)/htmlhelp\n\t@echo\n\t@echo \"Build finished; now you can run HTML Help Workshop with the\" \\\n\t      \".hhp project file in $(BUILDDIR)/htmlhelp.\"\n\nqthelp:\n\t$(SPHINXBUILD) -b qthelp $(ALLSPHINXOPTS) $(BUILDDIR)/qthelp\n\t@echo\n\t@echo \"Build finished; now you can run \"qcollectiongenerator\" with the\" \\\n\t      \".qhcp project file in $(BUILDDIR)/qthelp, like this:\"\n\t@echo \"# qcollectiongenerator $(BUILDDIR)/qthelp/Classy.qhcp\"\n\t@echo \"To view the help file:\"\n\t@echo \"# assistant -collectionFile $(BUILDDIR)/qthelp/Classy.qhc\"\n\ndevhelp:\n\t$(SPHINXBUILD) -b devhelp $(ALLSPHINXOPTS) $(BUILDDIR)/devhelp\n\t@echo\n\t@echo \"Build finished.\"\n\t@echo \"To view the help file:\"\n\t@echo \"# mkdir -p $$HOME/.local/share/devhelp/Classy\"\n\t@echo \"# ln -s $(BUILDDIR)/devhelp $$HOME/.local/share/devhelp/Classy\"\n\t@echo \"# devhelp\"\n\nepub:\n\t$(SPHINXBUILD) -b epub $(ALLSPHINXOPTS) $(BUILDDIR)/epub\n\t@echo\n\t@echo \"Build finished. The epub file is in $(BUILDDIR)/epub.\"\n\nlatex:\n\t$(SPHINXBUILD) -b latex $(ALLSPHINXOPTS) $(BUILDDIR)/latex\n\t@echo\n\t@echo \"Build finished; the LaTeX files are in $(BUILDDIR)/latex.\"\n\t@echo \"Run \\`make all-pdf' or \\`make all-ps' in that directory to\" \\\n\t      \"run these through (pdf)latex.\"\n\nlatexpdf: latex\n\t$(SPHINXBUILD) -b latex $(ALLSPHINXOPTS) $(BUILDDIR)/latex\n\t@echo \"Running LaTeX files through pdflatex...\"\n\tmake -C $(BUILDDIR)/latex all-pdf\n\t@echo \"pdflatex finished; the PDF files are in $(BUILDDIR)/latex.\"\n\ntext:\n\t$(SPHINXBUILD) -b text $(ALLSPHINXOPTS) $(BUILDDIR)/text\n\t@echo\n\t@echo \"Build finished. The text files are in $(BUILDDIR)/text.\"\n\nman:\n\t$(SPHINXBUILD) -b man $(ALLSPHINXOPTS) $(BUILDDIR)/man\n\t@echo\n\t@echo \"Build finished. The manual pages are in $(BUILDDIR)/man.\"\n\nchanges:\n\t$(SPHINXBUILD) -b changes $(ALLSPHINXOPTS) $(BUILDDIR)/changes\n\t@echo\n\t@echo \"The overview file is in $(BUILDDIR)/changes.\"\n\nlinkcheck:\n\t$(SPHINXBUILD) -b linkcheck $(ALLSPHINXOPTS) $(BUILDDIR)/linkcheck\n\t@echo\n\t@echo \"Link check complete; look for any errors in the above output \" \\\n\t      \"or in $(BUILDDIR)/linkcheck/output.txt.\"\n\ndoctest:\n\t$(SPHINXBUILD) -b doctest $(ALLSPHINXOPTS) $(BUILDDIR)/doctest\n\t@echo \"Testing of doctests in the sources finished, look at the \" \\\n\t      \"results in $(BUILDDIR)/doctest/output.txt.\"\n"
  },
  {
    "path": "docs/_themes/rb_theme/layout.html",
    "content": "{% extends \"basic/layout.html\" %}\n{% block header %}\n  {{ super() }}\n  {% if pagename == 'index' %}\n  <div class=indexwrapper>\n  {% endif %}\n{% endblock %}\n{% block footer %}\n  {% if pagename == 'index' %}\n  </div>\n  {% endif %}\n{% endblock %}\n{# do not display relbars #}\n{% block relbar1 %}{% endblock %}\n{% block relbar2 %}\n  {% if theme_github_fork %}\n    <a href=\"http://github.com/{{ theme_github_fork }}\"><img style=\"position: fixed; top: 0; right: 0; border: 0;\"\n    src=\"http://s3.amazonaws.com/github/ribbons/forkme_right_darkblue_121621.png\" alt=\"Fork me on GitHub\" /></a>\n  {% endif %}\n{% endblock %}\n{% block sidebar1 %}{% endblock %}\n{% block sidebar2 %}{% endblock %}\n"
  },
  {
    "path": "docs/_themes/rb_theme/static/rb.css_t",
    "content": "@import url(\"basic.css\");\n@import url(http://fonts.googleapis.com/css?family=Roboto+Mono:400,700italic,700,400italic);\n \n/* -- page layout ----------------------------------------------------------- */\n \nbody {\n    font-family: 'Verdana', sans-serif;\n    font-weight: 300;\n    font-size: 17px;\n    color: #000;\n    background: white;\n    margin: 0;\n    padding: 0;\n}\n\ndiv.documentwrapper {\n    float: left;\n    width: 100%;\n}\n\ndiv.bodywrapper {\n    margin: 40px auto 0 auto;\n    max-width: 800px;\n}\n\nhr {\n    border: 1px solid #B1B4B6;\n}\n \ndiv.body {\n    background-color: #ffffff;\n    color: #3E4349;\n    padding: 0 30px 30px 30px;\n}\n\nimg.floatingflask {\n    padding: 0 0 10px 10px;\n    float: right;\n}\n \ndiv.footer {\n    text-align: right;\n    color: #888;\n    padding: 10px;\n    font-size: 14px;\n    width: 650px;\n    margin: 0 auto 40px auto;\n}\n \ndiv.footer a {\n    color: #888;\n    text-decoration: underline;\n}\n \ndiv.related {\n    line-height: 32px;\n    color: #888;\n}\n\ndiv.related ul {\n    padding: 0 0 0 10px;\n}\n \ndiv.related a {\n    color: #444;\n}\n \n/* -- body styles ----------------------------------------------------------- */\n \na {\n    color: white;\n    background: black;\n    font-weight: bold;\n    text-decoration: none;\n}\n \na:hover {\n    color: #888;\n    background: transparent;\n    text-decoration: underline;\n}\n\ndiv.body {\n    padding-bocodeom: 40px; /* saved for footer */\n}\n \ndiv.body h1,\ndiv.body h2,\ndiv.body h3,\ndiv.body h4,\ndiv.body h5,\ndiv.body h6 {\n    font-family: 'Verdana', sans-serif;\n    font-weight: bold;\n    margin: 30px 0px 10px 0px;\n    padding: 0;\n    color: black;\n}\n\ndiv.body h1:before {\n    content: \"\";\n    display: block;\n    background: url(rb.png) no-repeat center center;\n    background-size: 100%;\n    width: 256px;\n    height: 246px;\n    float: right;\n    margin: 0 0 25px 25px;\n}\n \ndiv.body h2 { font-size: 180%; }\ndiv.body h3 { font-size: 150%; }\ndiv.body h4 { font-size: 130%; }\ndiv.body h5 { font-size: 100%; }\ndiv.body h6 { font-size: 100%; }\n \na.headerlink {\n    color: white;\n    padding: 0 4px;\n    text-decoration: none;\n}\n \na.headerlink:hover {\n    color: #444;\n    background: #eaeaea;\n}\n \ndiv.body p, div.body dd, div.body li {\n    line-height: 1.4em;\n}\n\ndiv.admonition {\n    background: #fafafa;\n    margin: 20px -30px;\n    padding: 10px 30px;\n    border-top: 1px solid #ccc;\n    border-bocodeom: 1px solid #ccc;\n}\n\ndiv.admonition p.admonition-title {\n    font-family: 'Garamond', 'Georgia', serif;\n    font-weight: normal;\n    font-size: 24px;\n    margin: 0 0 10px 0;\n    padding: 0;\n    line-height: 1;\n}\n\ndiv.admonition p.last {\n    margin-bocodeom: 0;\n}\n\ndiv.highlight{\n    background-color: white;\n}\n\ndt:target, .highlight {\n    background: #FAF3E8;\n}\n\ndiv.note {\n    background-color: #eee;\n    border: 1px solid #ccc;\n}\n \ndiv.seealso {\n    background-color: #ffc;\n    border: 1px solid #ff6;\n}\n \ndiv.topic {\n    background-color: #eee;\n}\n \ndiv.warning {\n    background-color: #ffe4e4;\n    border: 1px solid #f66;\n}\n \np.admonition-title {\n    display: inline;\n}\n \np.admonition-title:after {\n    content: \":\";\n}\n\npre, code {\n    font-family: 'Roboto Mono', monospace;\n    font-size: 1em;\n}\n\nimg.screenshot {\n}\n\ncode.descname, code.descclassname {\n    font-size: 0.95em;\n}\n\ncode.descname {\n    padding-right: 0.08em;\n}\n\nimg.screenshot {\n    -moz-box-shadow: 2px 2px 4px #eee;\n    -webkit-box-shadow: 2px 2px 4px #eee;\n    box-shadow: 2px 2px 4px #eee;\n}\n\ntable.docutils {\n    border: 1px solid #888;\n    -moz-box-shadow: 2px 2px 4px #eee;\n    -webkit-box-shadow: 2px 2px 4px #eee;\n    box-shadow: 2px 2px 4px #eee;\n}\n\ntable.docutils td, table.docutils th {\n    border: 1px solid #888;\n    padding: 0.25em 0.7em;\n}\n\ntable.field-list, table.footnote {\n    border: none;\n    -moz-box-shadow: none;\n    -webkit-box-shadow: none;\n    box-shadow: none;\n}\n\ntable.footnote {\n    margin: 15px 0;\n    width: 100%;\n    border: 1px solid #eee;\n}\n\ntable.field-list th {\n    padding: 0 0.8em 0 0;\n}\n\ntable.field-list td {\n    padding: 0;\n}\n\ntable.footnote td {\n    padding: 0.5em;\n}\n\ndl {\n    margin: 0;\n    padding: 0;\n}\n\ndl dd {\n    margin-left: 30px;\n}\n \npre {\n    margin: 15px 0;\n    line-height: 1.4em;\n    padding: 10px 20px;\n    background: #eee;\n}\n\na.reference.internal {\n    background: transparent;\n    color: black;\n}\n\ncode, a code, code.xref {\n    background-color: #eee;\n    color: #222;\n    /* padding: 1px 2px; */\n}\n\na:hover code {\n    background: black;\n    color: white;\n}\n"
  },
  {
    "path": "docs/_themes/rb_theme/theme.conf",
    "content": "[theme]\ninherit = basic\nstylesheet = rb.css\nnosidebar = true\n\n[options]\nindex_logo = ''\ngithub_fork =\n"
  },
  {
    "path": "docs/conf.py",
    "content": "# -*- coding: utf-8 -*-\n#\n# rb documentation build configuration file, created by\n# sphinx-quickstart on Mon Apr 26 19:53:01 2010.\n#\n# This file is execfile()d with the current directory set to its containing dir.\n#\n# Note that not all possible configuration values are present in this\n# autogenerated file.\n#\n# All configuration values have a default; values that are commented out\n# serve to show the default.\n\nimport sys, os\n\n# If extensions (or modules to document with autodoc) are in another directory,\n# add these directories to sys.path here. If the directory is relative to the\n# documentation root, use os.path.abspath to make it absolute, like shown here.\nsys.path.append(os.path.abspath('_themes'))\nsys.path.append(os.path.abspath('..'))\n\n# -- General configuration -----------------------------------------------------\n\n# If your documentation needs a minimal Sphinx version, state it here.\n#needs_sphinx = '1.0'\n\n# Add any Sphinx extension module names here, as strings. They can be extensions\n# coming with Sphinx (named 'sphinx.ext.*') or your custom ones.\nextensions = ['sphinx.ext.autodoc']\n\n# Add any paths that contain templates here, relative to this directory.\ntemplates_path = ['_templates']\n\n# The suffix of source filenames.\nsource_suffix = '.rst'\n\n# The encoding of source files.\n#source_encoding = 'utf-8-sig'\n\n# The master toctree document.\nmaster_doc = 'index'\n\n# General information about the project.\nproject = u'rb'\ncopyright = u'2015, Function Software Inc.'\n\n# The version info for the project you're documenting, acts as replacement for\n# |version| and |release|, also used in various other places throughout the\n# built documents.\n#\n# The short X.Y version.\nversion = '1.0'\n# The full version, including alpha/beta/rc tags.\nrelease = '1.0'\n\n# The language for content autogenerated by Sphinx. Refer to documentation\n# for a list of supported languages.\n#language = None\n\n# There are two options for replacing |today|: either, you set today to some\n# non-false value, then it is used:\n#today = ''\n# Else, today_fmt is used as the format for a strftime call.\n#today_fmt = '%B %d, %Y'\n\n# List of patterns, relative to source directory, that match files and\n# directories to ignore when looking for source files.\nexclude_patterns = ['_build']\n\n# The reST default role (used for this markup: `text`) to use for all documents.\n#default_role = None\n\n# If true, '()' will be appended to :func: etc. cross-reference text.\n#add_function_parentheses = True\n\n# If true, the current module name will be prepended to all description\n# unit titles (such as .. function::).\n#add_module_names = True\n\n# If true, sectionauthor and moduleauthor directives will be shown in the\n# output. They are ignored by default.\n#show_authors = False\n\n# A list of ignored prefixes for module index sorting.\n#modindex_common_prefix = []\n\n\n# -- Options for HTML output ---------------------------------------------------\n\n# The theme to use for HTML and HTML Help pages.  Major themes that come with\n# Sphinx are currently 'default' and 'sphinxdoc'.\nhtml_theme = 'rb_theme'\n\n# Theme options are theme-specific and customize the look and feel of a theme\n# further.  For a list of options available for each theme, see the\n# documentation.\nhtml_theme_options = {\n    'index_logo':       'rb.png',\n    'github_fork':      'getsentry/rb'\n}\n\n# Add any paths that contain custom themes here, relative to this directory.\nhtml_theme_path = ['_themes']\n\n# The name for this set of Sphinx documents.  If None, it defaults to\n# \"<project> v<release> documentation\".\nhtml_title = 'rb'\n\n# A shorter title for the navigation bar.  Default is the same as html_title.\n#html_short_title = None\n\n# The name of an image file (relative to this directory) to place at the top\n# of the sidebar.\n#html_logo = None\n\n# The name of an image file (within the static path) to use as favicon of the\n# docs.  This file should be a Windows icon file (.ico) being 16x16 or 32x32\n# pixels large.\n#html_favicon = None\n\n# Add any paths that contain custom static files (such as style sheets) here,\n# relative to this directory. They are copied after the builtin static files,\n# so a file named \"default.css\" will overwrite the builtin \"default.css\".\nhtml_static_path = ['_static']\n\n# If not '', a 'Last updated on:' timestamp is inserted at every page bottom,\n# using the given strftime format.\n#html_last_updated_fmt = '%b %d, %Y'\n\n# If true, SmartyPants will be used to convert quotes and dashes to\n# typographically correct entities.\n#html_use_smartypants = True\n\n# Custom sidebar templates, maps document names to template names.\n#html_sidebars = {}\n\n# Additional templates that should be rendered to pages, maps page names to\n# template names.\n#html_additional_pages = {}\n\n# If false, no module index is generated.\n#html_domain_indices = True\n\n# If false, no index is generated.\n#html_use_index = True\n\n# If true, the index is split into individual pages for each letter.\n#html_split_index = False\n\n# If true, links to the reST sources are added to the pages.\n#html_show_sourcelink = True\n\n# If true, \"Created using Sphinx\" is shown in the HTML footer. Default is True.\n#html_show_sphinx = True\n\n# If true, \"(C) Copyright ...\" is shown in the HTML footer. Default is True.\n#html_show_copyright = True\n\n# If true, an OpenSearch description file will be output, and all pages will\n# contain a <link> tag referring to it.  The value of this option must be the\n# base URL from which the finished HTML is served.\n#html_use_opensearch = ''\n\n# If nonempty, this is the file name suffix for HTML files (e.g. \".xhtml\").\n#html_file_suffix = ''\n\n# Output file base name for HTML help builder.\nhtmlhelp_basename = 'rbdoc'\n\n\n# -- Options for LaTeX output --------------------------------------------------\n\n# The paper size ('letter' or 'a4').\n#latex_paper_size = 'letter'\n\n# The font size ('10pt', '11pt' or '12pt').\n#latex_font_size = '10pt'\n\n# Grouping the document tree into LaTeX files. List of tuples\n# (source start file, target name, title, author, documentclass [howto/manual]).\nlatex_documents = [\n  ('index', 'rb.tex', u'rb documentation',\n   u'Function Software Inc.', 'manual'),\n]\n\n# The name of an image file (relative to this directory) to place at the top of\n# the title page.\n#latex_logo = None\n\n# For \"manual\" documents, if this is true, then toplevel headings are parts,\n# not chapters.\n#latex_use_parts = False\n\n# Additional stuff for the LaTeX preamble.\n#latex_preamble = ''\n\n# Documents to append as an appendix to all manuals.\n#latex_appendices = []\n\n# If false, no module index is generated.\n#latex_domain_indices = True\n\npygments_style = 'tango'\n\n\n# -- Options for manual page output --------------------------------------------\n\n# One entry per manual page. List of tuples\n# (source start file, name, description, authors, manual section).\nman_pages = [\n    ('index', 'rb', u'rb documentation',\n     [u'Function Software Inc.'], 1)\n]\n\nintersphinx_mapping = {\n}\n"
  },
  {
    "path": "docs/index.rst",
    "content": "rb: the redis blaster\n=====================\n\n.. module:: rb\n\nRb, the redis blaster, is a library that implements non-replicated\nsharding for redis.  It implements a custom routing system on top of\npython redis that allows you to automatically target different servers\nwithout having to manually route requests to the individual nodes.\n\nIt does not implement all functionality of redis and does not attempt to\ndo so.  You can at any point get a client to a specific host, but for the\nmost part the assumption is that your operations are limited to basic\nkey/value operations that can be routed to different nodes automatically.\n\nWhat you can do:\n\n*   automatically target hosts for single-key operations\n*   execute commands against all or a subset of nodes\n*   do all of that in parallel\n\nInstallation\n------------\n\nrb is available on PyPI and can be installed from there::\n\n    $ pip install rb\n\nConfiguration\n-------------\n\nGetting started with rb is super easy.  If you have been using py-redis\nbefore you will feel right at home.  The main difference is that instead\nof connecting to a single host, you configure a cluster to connect to\nmultiple::\n\n    from rb import Cluster\n\n    cluster = Cluster(hosts={\n        0: {'port': 6379},\n        1: {'port': 6380},\n        2: {'port': 6381},\n        3: {'port': 6382},\n        4: {'port': 6379},\n        5: {'port': 6380},\n        6: {'port': 6381},\n        7: {'port': 6382},\n    }, host_defaults={\n        'host': '127.0.0.1',\n    })\n\nIn this case we set up 8 nodes on four different server processes on the\nsame host.  The `hosts` parameter is a mapping of hosts to connect to.\nThe key of the dictionary is the host ID (an integer) and the value is\na dictionary of parameters.  The `host_defaults` is a dictionary of\noptional defaults that is filled in for all hosts.  This is useful if you\nwant to share some common defaults that repeat (in this case all hosts\nconnect to localhost).\n\nIn the default configuration the :class:`PartitionRouter` is used for\nrouting.\n\nRouting\n-------\n\nNow that the cluster is constructed we can use\n:meth:`Cluster.get_routing_client` to get a redis client that\nautomatically routes to the right redis nodes for each command::\n\n    client = cluster.get_routing_client()\n    results = {}\n    for key in keys_to_look_up:\n        results[key] = client.get(key)\n\nThe client works pretty much exactly like a standard pyredis\n`StrictClient` with the main difference that it can only execute commands\nthat involve exactly one key.\n\nThis basic operation however runs in series.  What makes rb useful is that\nit can automatically build redis pipelines and send out queries to many\nhosts in parallel.  This however changes the usage slightly as now the\nvalue is not immediately available::\n\n    results = {}\n    with cluster.map() as client:\n        for key in keys_to_look_up:\n            results[key] = client.get(key)\n\nWhile it looks similar so far, instead of storing the actual values in the\nresult dictionary, :class:`Promise` objects are stored instead.  When the\nmap context manager ends they are guaranteed however to have been executed\nand you can access the :attr:`Promise.value` attribute to get the value::\n\n    for key, promise in results.iteritems():\n        print '%s: %s' % (key, promise.value)\n\nIf you want to send a command to all participating hosts (for instance to\ndelete the database) you can use the :meth:`Cluster.all` method::\n\n    with cluster.all() as client:\n        client.flushdb()\n\nIf you do that, the promise value is a dictionary with the host IDs as\nkeys and the results as value.  As an example::\n\n    with cluster.all() as client:\n        results = client.info()\n    for host_id, info in results.iteritems():\n        print 'host %s is running %s' % (host_id, info['os'])\n\nTo explicitly target some hosts you can use :meth:`Cluster.fanout` which\naccepts a list of host IDs to send the command to.\n\nAPI\n---\n\nThis is the entire reference of the public API.  Note that this library\nextends the Python redis library so some of these classes have more\nfunctionality for which you will need to consult the py-redis library.\n\nCluster\n```````\n\n.. autoclass:: Cluster\n   :members:\n\nClients\n```````\n\n.. autoclass:: RoutingClient\n   :members:\n\n.. autoclass:: MappingClient\n   :members:\n\n.. autoclass:: FanoutClient\n   :members:\n\nPromise\n```````\n\n.. autoclass:: Promise\n   :members:\n\nRouters\n```````\n\n.. autoclass:: BaseRouter\n   :members:\n\n.. autoclass:: ConsistentHashingRouter\n   :members:\n\n.. autoclass:: PartitionRouter\n   :members:\n\n.. autoexception:: UnroutableCommand\n\nTesting\n```````\n\n.. autoclass:: rb.testing.TestSetup\n\n.. autofunction:: rb.testing.make_test_cluster\n"
  },
  {
    "path": "docs/make.bat",
    "content": "@ECHO OFF\n\nREM Command file for Sphinx documentation\n\nif \"%SPHINXBUILD%\" == \"\" (\n\tset SPHINXBUILD=sphinx-build\n)\nset BUILDDIR=_build\nset ALLSPHINXOPTS=-d %BUILDDIR%/doctrees %SPHINXOPTS% .\nif NOT \"%PAPER%\" == \"\" (\n\tset ALLSPHINXOPTS=-D latex_paper_size=%PAPER% %ALLSPHINXOPTS%\n)\n\nif \"%1\" == \"\" goto help\n\nif \"%1\" == \"help\" (\n\t:help\n\techo.Please use `make ^<target^>` where ^<target^> is one of\n\techo.  html       to make standalone HTML files\n\techo.  dirhtml    to make HTML files named index.html in directories\n\techo.  singlehtml to make a single large HTML file\n\techo.  pickle     to make pickle files\n\techo.  json       to make JSON files\n\techo.  htmlhelp   to make HTML files and a HTML help project\n\techo.  qthelp     to make HTML files and a qthelp project\n\techo.  devhelp    to make HTML files and a Devhelp project\n\techo.  epub       to make an epub\n\techo.  latex      to make LaTeX files, you can set PAPER=a4 or PAPER=letter\n\techo.  text       to make text files\n\techo.  man        to make manual pages\n\techo.  changes    to make an overview over all changed/added/deprecated items\n\techo.  linkcheck  to check all external links for integrity\n\techo.  doctest    to run all doctests embedded in the documentation if enabled\n\tgoto end\n)\n\nif \"%1\" == \"clean\" (\n\tfor /d %%i in (%BUILDDIR%\\*) do rmdir /q /s %%i\n\tdel /q /s %BUILDDIR%\\*\n\tgoto end\n)\n\nif \"%1\" == \"html\" (\n\t%SPHINXBUILD% -b html %ALLSPHINXOPTS% %BUILDDIR%/html\n\techo.\n\techo.Build finished. The HTML pages are in %BUILDDIR%/html.\n\tgoto end\n)\n\nif \"%1\" == \"dirhtml\" (\n\t%SPHINXBUILD% -b dirhtml %ALLSPHINXOPTS% %BUILDDIR%/dirhtml\n\techo.\n\techo.Build finished. The HTML pages are in %BUILDDIR%/dirhtml.\n\tgoto end\n)\n\nif \"%1\" == \"singlehtml\" (\n\t%SPHINXBUILD% -b singlehtml %ALLSPHINXOPTS% %BUILDDIR%/singlehtml\n\techo.\n\techo.Build finished. The HTML pages are in %BUILDDIR%/singlehtml.\n\tgoto end\n)\n\nif \"%1\" == \"pickle\" (\n\t%SPHINXBUILD% -b pickle %ALLSPHINXOPTS% %BUILDDIR%/pickle\n\techo.\n\techo.Build finished; now you can process the pickle files.\n\tgoto end\n)\n\nif \"%1\" == \"json\" (\n\t%SPHINXBUILD% -b json %ALLSPHINXOPTS% %BUILDDIR%/json\n\techo.\n\techo.Build finished; now you can process the JSON files.\n\tgoto end\n)\n\nif \"%1\" == \"htmlhelp\" (\n\t%SPHINXBUILD% -b htmlhelp %ALLSPHINXOPTS% %BUILDDIR%/htmlhelp\n\techo.\n\techo.Build finished; now you can run HTML Help Workshop with the ^\n.hhp project file in %BUILDDIR%/htmlhelp.\n\tgoto end\n)\n\nif \"%1\" == \"qthelp\" (\n\t%SPHINXBUILD% -b qthelp %ALLSPHINXOPTS% %BUILDDIR%/qthelp\n\techo.\n\techo.Build finished; now you can run \"qcollectiongenerator\" with the ^\n.qhcp project file in %BUILDDIR%/qthelp, like this:\n\techo.^> qcollectiongenerator %BUILDDIR%\\qthelp\\Classy.qhcp\n\techo.To view the help file:\n\techo.^> assistant -collectionFile %BUILDDIR%\\qthelp\\Classy.ghc\n\tgoto end\n)\n\nif \"%1\" == \"devhelp\" (\n\t%SPHINXBUILD% -b devhelp %ALLSPHINXOPTS% %BUILDDIR%/devhelp\n\techo.\n\techo.Build finished.\n\tgoto end\n)\n\nif \"%1\" == \"epub\" (\n\t%SPHINXBUILD% -b epub %ALLSPHINXOPTS% %BUILDDIR%/epub\n\techo.\n\techo.Build finished. The epub file is in %BUILDDIR%/epub.\n\tgoto end\n)\n\nif \"%1\" == \"latex\" (\n\t%SPHINXBUILD% -b latex %ALLSPHINXOPTS% %BUILDDIR%/latex\n\techo.\n\techo.Build finished; the LaTeX files are in %BUILDDIR%/latex.\n\tgoto end\n)\n\nif \"%1\" == \"text\" (\n\t%SPHINXBUILD% -b text %ALLSPHINXOPTS% %BUILDDIR%/text\n\techo.\n\techo.Build finished. The text files are in %BUILDDIR%/text.\n\tgoto end\n)\n\nif \"%1\" == \"man\" (\n\t%SPHINXBUILD% -b man %ALLSPHINXOPTS% %BUILDDIR%/man\n\techo.\n\techo.Build finished. The manual pages are in %BUILDDIR%/man.\n\tgoto end\n)\n\nif \"%1\" == \"changes\" (\n\t%SPHINXBUILD% -b changes %ALLSPHINXOPTS% %BUILDDIR%/changes\n\techo.\n\techo.The overview file is in %BUILDDIR%/changes.\n\tgoto end\n)\n\nif \"%1\" == \"linkcheck\" (\n\t%SPHINXBUILD% -b linkcheck %ALLSPHINXOPTS% %BUILDDIR%/linkcheck\n\techo.\n\techo.Link check complete; look for any errors in the above output ^\nor in %BUILDDIR%/linkcheck/output.txt.\n\tgoto end\n)\n\nif \"%1\" == \"doctest\" (\n\t%SPHINXBUILD% -b doctest %ALLSPHINXOPTS% %BUILDDIR%/doctest\n\techo.\n\techo.Testing of doctests in the sources finished, look at the ^\nresults in %BUILDDIR%/doctest/output.txt.\n\tgoto end\n)\n\n:end\n"
  },
  {
    "path": "hooks/pre-commit",
    "content": "#!/usr/bin/env python\n\nimport glob\nimport os\nimport sys\n\nos.environ['PYFLAKES_NODOCTEST'] = '1'\n\n# pep8.py uses sys.argv to find setup.cfg\nsys.argv = [os.path.join(os.path.dirname(__file__), os.pardir, os.pardir)]\n\n# git usurbs your bin path for hooks and will always run system python\nif 'VIRTUAL_ENV' in os.environ:\n    site_packages = glob.glob(\n        '%s/lib/*/site-packages' % os.environ['VIRTUAL_ENV'])[0]\n    sys.path.insert(0, site_packages)\n\n\ndef py_lint(files_modified):\n    from flake8.main import DEFAULT_CONFIG\n    from flake8.engine import get_style_guide\n\n    # remove non-py files and files which no longer exist\n    files_modified = filter(lambda x: x.endswith('.py'), files_modified)\n\n    if not files_modified:\n        return False\n\n    flake8_style = get_style_guide(config_file=DEFAULT_CONFIG)\n    report = flake8_style.check_files(files_modified)\n\n    return report.total_errors != 0\n\n\ndef main():\n    from flake8.hooks import run\n\n    gitcmd = \"git diff-index --cached --name-only HEAD\"\n\n    _, files_modified, _ = run(gitcmd)\n\n    files_modified = filter(lambda x: os.path.exists(x), files_modified)\n\n    if py_lint(files_modified):\n        return 1\n    return 0\n\nif __name__ == '__main__':\n    sys.exit(main())\n"
  },
  {
    "path": "rb/__init__.py",
    "content": "\"\"\"\n    rb\n    ~~\n\n    The redis blaster.\n\n    :copyright: (c) 2015 Functional Software Inc.\n    :license: Apache License 2.0, see LICENSE for more details.\n\"\"\"\nfrom rb.cluster import Cluster\nfrom rb.clients import RoutingClient, MappingClient, FanoutClient\nfrom rb.router import (\n    BaseRouter,\n    ConsistentHashingRouter,\n    PartitionRouter,\n    UnroutableCommand,\n)\nfrom rb.promise import Promise\n\n\n__version__ = \"1.10.0\"\n\n__all__ = [\n    # cluster\n    \"Cluster\",\n    # client\n    \"RoutingClient\",\n    \"MappingClient\",\n    \"FanoutClient\",\n    # router\n    \"BaseRouter\",\n    \"ConsistentHashingRouter\",\n    \"PartitionRouter\",\n    \"UnroutableCommand\",\n    # promise\n    \"Promise\",\n]\n"
  },
  {
    "path": "rb/_rediscommands.py",
    "content": "# flake8: noqa\n\nCOMMANDS = {\n    \"APPEND\": {\"arity\": 3, \"flags\": [\"write\", \"denyoom\"], \"key_spec\": (1, 1, 1)},\n    \"AUTH\": {\n        \"arity\": 2,\n        \"flags\": [\"readonly\", \"noscript\", \"loading\", \"stale\", \"fast\"],\n        \"key_spec\": (0, 0, 0),\n    },\n    \"BGREWRITEAOF\": {\"arity\": 1, \"flags\": [\"readonly\", \"admin\"], \"key_spec\": (0, 0, 0)},\n    \"BGSAVE\": {\"arity\": 1, \"flags\": [\"readonly\", \"admin\"], \"key_spec\": (0, 0, 0)},\n    \"BITCOUNT\": {\"arity\": -2, \"flags\": [\"readonly\"], \"key_spec\": (1, 1, 1)},\n    \"BITOP\": {\"arity\": -4, \"flags\": [\"write\", \"denyoom\"], \"key_spec\": (2, -1, 1)},\n    \"BITPOS\": {\"arity\": -3, \"flags\": [\"readonly\"], \"key_spec\": (1, 1, 1)},\n    \"BLPOP\": {\"arity\": -3, \"flags\": [\"write\", \"noscript\"], \"key_spec\": (1, -2, 1)},\n    \"BRPOP\": {\"arity\": -3, \"flags\": [\"write\", \"noscript\"], \"key_spec\": (1, 1, 1)},\n    \"BRPOPLPUSH\": {\n        \"arity\": 4,\n        \"flags\": [\"write\", \"denyoom\", \"noscript\"],\n        \"key_spec\": (1, 2, 1),\n    },\n    \"CLIENT\": {\"arity\": -2, \"flags\": [\"readonly\", \"admin\"], \"key_spec\": (0, 0, 0)},\n    \"COMMAND\": {\n        \"arity\": 0,\n        \"flags\": [\"readonly\", \"loading\", \"stale\"],\n        \"key_spec\": (0, 0, 0),\n    },\n    \"CONFIG\": {\n        \"arity\": -2,\n        \"flags\": [\"readonly\", \"admin\", \"stale\"],\n        \"key_spec\": (0, 0, 0),\n    },\n    \"DBSIZE\": {\"arity\": 1, \"flags\": [\"readonly\", \"fast\"], \"key_spec\": (0, 0, 0)},\n    \"DEBUG\": {\"arity\": -2, \"flags\": [\"admin\", \"noscript\"], \"key_spec\": (0, 0, 0)},\n    \"DECR\": {\"arity\": 2, \"flags\": [\"write\", \"denyoom\", \"fast\"], \"key_spec\": (1, 1, 1)},\n    \"DECRBY\": {\n        \"arity\": 3,\n        \"flags\": [\"write\", \"denyoom\", \"fast\"],\n        \"key_spec\": (1, 1, 1),\n    },\n    \"DEL\": {\"arity\": -2, \"flags\": [\"write\"], \"key_spec\": (1, -1, 1)},\n    \"DISCARD\": {\n        \"arity\": 1,\n        \"flags\": [\"readonly\", \"noscript\", \"fast\"],\n        \"key_spec\": (0, 0, 0),\n    },\n    \"DUMP\": {\"arity\": 2, \"flags\": [\"readonly\", \"admin\"], \"key_spec\": (1, 1, 1)},\n    \"ECHO\": {\"arity\": 2, \"flags\": [\"readonly\", \"fast\"], \"key_spec\": (0, 0, 0)},\n    \"EVAL\": {\"arity\": -3, \"flags\": [\"noscript\", \"movablekeys\"], \"key_spec\": (0, 0, 0)},\n    \"EVALSHA\": {\n        \"arity\": -3,\n        \"flags\": [\"noscript\", \"movablekeys\"],\n        \"key_spec\": (0, 0, 0),\n    },\n    \"EXEC\": {\"arity\": 1, \"flags\": [\"noscript\", \"skip_monitor\"], \"key_spec\": (0, 0, 0)},\n    \"EXISTS\": {\"arity\": 2, \"flags\": [\"readonly\", \"fast\"], \"key_spec\": (1, 1, 1)},\n    \"EXPIRE\": {\"arity\": 3, \"flags\": [\"write\", \"fast\"], \"key_spec\": (1, 1, 1)},\n    \"EXPIREAT\": {\"arity\": 3, \"flags\": [\"write\", \"fast\"], \"key_spec\": (1, 1, 1)},\n    \"FLUSHALL\": {\"arity\": 1, \"flags\": [\"write\"], \"key_spec\": (0, 0, 0)},\n    \"FLUSHDB\": {\"arity\": 1, \"flags\": [\"write\"], \"key_spec\": (0, 0, 0)},\n    \"GET\": {\"arity\": 2, \"flags\": [\"readonly\", \"fast\"], \"key_spec\": (1, 1, 1)},\n    \"GETBIT\": {\"arity\": 3, \"flags\": [\"readonly\", \"fast\"], \"key_spec\": (1, 1, 1)},\n    \"GETRANGE\": {\"arity\": 4, \"flags\": [\"readonly\"], \"key_spec\": (1, 1, 1)},\n    \"GETSET\": {\"arity\": 3, \"flags\": [\"write\", \"denyoom\"], \"key_spec\": (1, 1, 1)},\n    \"HDEL\": {\"arity\": -3, \"flags\": [\"write\", \"fast\"], \"key_spec\": (1, 1, 1)},\n    \"HEXISTS\": {\"arity\": 3, \"flags\": [\"readonly\", \"fast\"], \"key_spec\": (1, 1, 1)},\n    \"HGET\": {\"arity\": 3, \"flags\": [\"readonly\", \"fast\"], \"key_spec\": (1, 1, 1)},\n    \"HGETALL\": {\"arity\": 2, \"flags\": [\"readonly\"], \"key_spec\": (1, 1, 1)},\n    \"HINCRBY\": {\n        \"arity\": 4,\n        \"flags\": [\"write\", \"denyoom\", \"fast\"],\n        \"key_spec\": (1, 1, 1),\n    },\n    \"HINCRBYFLOAT\": {\n        \"arity\": 4,\n        \"flags\": [\"write\", \"denyoom\", \"fast\"],\n        \"key_spec\": (1, 1, 1),\n    },\n    \"HKEYS\": {\n        \"arity\": 2,\n        \"flags\": [\"readonly\", \"sort_for_script\"],\n        \"key_spec\": (1, 1, 1),\n    },\n    \"HLEN\": {\"arity\": 2, \"flags\": [\"readonly\", \"fast\"], \"key_spec\": (1, 1, 1)},\n    \"HMGET\": {\"arity\": -3, \"flags\": [\"readonly\"], \"key_spec\": (1, 1, 1)},\n    \"HMSET\": {\"arity\": -4, \"flags\": [\"write\", \"denyoom\"], \"key_spec\": (1, 1, 1)},\n    \"HSCAN\": {\"arity\": -3, \"flags\": [\"readonly\", \"random\"], \"key_spec\": (1, 1, 1)},\n    \"HSET\": {\"arity\": 4, \"flags\": [\"write\", \"denyoom\", \"fast\"], \"key_spec\": (1, 1, 1)},\n    \"HSETNX\": {\n        \"arity\": 4,\n        \"flags\": [\"write\", \"denyoom\", \"fast\"],\n        \"key_spec\": (1, 1, 1),\n    },\n    \"HVALS\": {\n        \"arity\": 2,\n        \"flags\": [\"readonly\", \"sort_for_script\"],\n        \"key_spec\": (1, 1, 1),\n    },\n    \"INCR\": {\"arity\": 2, \"flags\": [\"write\", \"denyoom\", \"fast\"], \"key_spec\": (1, 1, 1)},\n    \"INCRBY\": {\n        \"arity\": 3,\n        \"flags\": [\"write\", \"denyoom\", \"fast\"],\n        \"key_spec\": (1, 1, 1),\n    },\n    \"INCRBYFLOAT\": {\n        \"arity\": 3,\n        \"flags\": [\"write\", \"denyoom\", \"fast\"],\n        \"key_spec\": (1, 1, 1),\n    },\n    \"INFO\": {\n        \"arity\": -1,\n        \"flags\": [\"readonly\", \"loading\", \"stale\"],\n        \"key_spec\": (0, 0, 0),\n    },\n    \"KEYS\": {\n        \"arity\": 2,\n        \"flags\": [\"readonly\", \"sort_for_script\"],\n        \"key_spec\": (0, 0, 0),\n    },\n    \"LASTSAVE\": {\n        \"arity\": 1,\n        \"flags\": [\"readonly\", \"random\", \"fast\"],\n        \"key_spec\": (0, 0, 0),\n    },\n    \"LATENCY\": {\n        \"arity\": -2,\n        \"flags\": [\"readonly\", \"admin\", \"noscript\", \"loading\", \"stale\"],\n        \"key_spec\": (0, 0, 0),\n    },\n    \"LINDEX\": {\"arity\": 3, \"flags\": [\"readonly\"], \"key_spec\": (1, 1, 1)},\n    \"LINSERT\": {\"arity\": 5, \"flags\": [\"write\", \"denyoom\"], \"key_spec\": (1, 1, 1)},\n    \"LLEN\": {\"arity\": 2, \"flags\": [\"readonly\", \"fast\"], \"key_spec\": (1, 1, 1)},\n    \"LPOP\": {\"arity\": 2, \"flags\": [\"write\", \"fast\"], \"key_spec\": (1, 1, 1)},\n    \"LPUSH\": {\n        \"arity\": -3,\n        \"flags\": [\"write\", \"denyoom\", \"fast\"],\n        \"key_spec\": (1, 1, 1),\n    },\n    \"LPUSHX\": {\n        \"arity\": 3,\n        \"flags\": [\"write\", \"denyoom\", \"fast\"],\n        \"key_spec\": (1, 1, 1),\n    },\n    \"LRANGE\": {\"arity\": 4, \"flags\": [\"readonly\"], \"key_spec\": (1, 1, 1)},\n    \"LREM\": {\"arity\": 4, \"flags\": [\"write\"], \"key_spec\": (1, 1, 1)},\n    \"LSET\": {\"arity\": 4, \"flags\": [\"write\", \"denyoom\"], \"key_spec\": (1, 1, 1)},\n    \"LTRIM\": {\"arity\": 4, \"flags\": [\"write\"], \"key_spec\": (1, 1, 1)},\n    \"MGET\": {\"arity\": -2, \"flags\": [\"readonly\"], \"key_spec\": (1, -1, 1)},\n    \"MIGRATE\": {\"arity\": 6, \"flags\": [\"write\", \"admin\"], \"key_spec\": (0, 0, 0)},\n    \"MONITOR\": {\n        \"arity\": 1,\n        \"flags\": [\"readonly\", \"admin\", \"noscript\"],\n        \"key_spec\": (0, 0, 0),\n    },\n    \"MOVE\": {\"arity\": 3, \"flags\": [\"write\", \"fast\"], \"key_spec\": (1, 1, 1)},\n    \"MSET\": {\"arity\": -3, \"flags\": [\"write\", \"denyoom\"], \"key_spec\": (1, -1, 2)},\n    \"MSETNX\": {\"arity\": -3, \"flags\": [\"write\", \"denyoom\"], \"key_spec\": (1, -1, 2)},\n    \"MULTI\": {\n        \"arity\": 1,\n        \"flags\": [\"readonly\", \"noscript\", \"fast\"],\n        \"key_spec\": (0, 0, 0),\n    },\n    \"OBJECT\": {\"arity\": 3, \"flags\": [\"readonly\"], \"key_spec\": (2, 2, 2)},\n    \"PERSIST\": {\"arity\": 2, \"flags\": [\"write\", \"fast\"], \"key_spec\": (1, 1, 1)},\n    \"PEXPIRE\": {\"arity\": 3, \"flags\": [\"write\", \"fast\"], \"key_spec\": (1, 1, 1)},\n    \"PEXPIREAT\": {\"arity\": 3, \"flags\": [\"write\", \"fast\"], \"key_spec\": (1, 1, 1)},\n    \"PFADD\": {\n        \"arity\": -2,\n        \"flags\": [\"write\", \"denyoom\", \"fast\"],\n        \"key_spec\": (1, 1, 1),\n    },\n    \"PFCOUNT\": {\"arity\": -2, \"flags\": [\"write\"], \"key_spec\": (1, 1, 1)},\n    \"PFDEBUG\": {\"arity\": -3, \"flags\": [\"write\"], \"key_spec\": (0, 0, 0)},\n    \"PFMERGE\": {\"arity\": -2, \"flags\": [\"write\", \"denyoom\"], \"key_spec\": (1, -1, 1)},\n    \"PFSELFTEST\": {\"arity\": 1, \"flags\": [\"readonly\"], \"key_spec\": (0, 0, 0)},\n    \"PING\": {\"arity\": 1, \"flags\": [\"readonly\", \"stale\", \"fast\"], \"key_spec\": (0, 0, 0)},\n    \"PSETEX\": {\"arity\": 4, \"flags\": [\"write\", \"denyoom\"], \"key_spec\": (1, 1, 1)},\n    \"PSUBSCRIBE\": {\n        \"arity\": -2,\n        \"flags\": [\"readonly\", \"pubsub\", \"noscript\", \"loading\", \"stale\"],\n        \"key_spec\": (0, 0, 0),\n    },\n    \"PSYNC\": {\n        \"arity\": 3,\n        \"flags\": [\"readonly\", \"admin\", \"noscript\"],\n        \"key_spec\": (0, 0, 0),\n    },\n    \"PTTL\": {\"arity\": 2, \"flags\": [\"readonly\", \"fast\"], \"key_spec\": (1, 1, 1)},\n    \"PUBLISH\": {\n        \"arity\": 3,\n        \"flags\": [\"readonly\", \"pubsub\", \"loading\", \"stale\", \"fast\"],\n        \"key_spec\": (0, 0, 0),\n    },\n    \"PUBSUB\": {\n        \"arity\": -2,\n        \"flags\": [\"readonly\", \"pubsub\", \"random\", \"loading\", \"stale\"],\n        \"key_spec\": (0, 0, 0),\n    },\n    \"PUNSUBSCRIBE\": {\n        \"arity\": -1,\n        \"flags\": [\"readonly\", \"pubsub\", \"noscript\", \"loading\", \"stale\"],\n        \"key_spec\": (0, 0, 0),\n    },\n    \"RANDOMKEY\": {\"arity\": 1, \"flags\": [\"readonly\", \"random\"], \"key_spec\": (0, 0, 0)},\n    \"RENAME\": {\"arity\": 3, \"flags\": [\"write\"], \"key_spec\": (1, 2, 1)},\n    \"RENAMENX\": {\"arity\": 3, \"flags\": [\"write\", \"fast\"], \"key_spec\": (1, 2, 1)},\n    \"REPLCONF\": {\n        \"arity\": -1,\n        \"flags\": [\"readonly\", \"admin\", \"noscript\", \"loading\", \"stale\"],\n        \"key_spec\": (0, 0, 0),\n    },\n    \"RESTORE\": {\n        \"arity\": 4,\n        \"flags\": [\"write\", \"denyoom\", \"admin\"],\n        \"key_spec\": (1, 1, 1),\n    },\n    \"ROLE\": {\n        \"arity\": 1,\n        \"flags\": [\"admin\", \"noscript\", \"loading\", \"stale\"],\n        \"key_spec\": (0, 0, 0),\n    },\n    \"RPOP\": {\"arity\": 2, \"flags\": [\"write\", \"fast\"], \"key_spec\": (1, 1, 1)},\n    \"RPOPLPUSH\": {\"arity\": 3, \"flags\": [\"write\", \"denyoom\"], \"key_spec\": (1, 2, 1)},\n    \"RPUSH\": {\n        \"arity\": -3,\n        \"flags\": [\"write\", \"denyoom\", \"fast\"],\n        \"key_spec\": (1, 1, 1),\n    },\n    \"RPUSHX\": {\n        \"arity\": 3,\n        \"flags\": [\"write\", \"denyoom\", \"fast\"],\n        \"key_spec\": (1, 1, 1),\n    },\n    \"SADD\": {\"arity\": -3, \"flags\": [\"write\", \"denyoom\", \"fast\"], \"key_spec\": (1, 1, 1)},\n    \"SAVE\": {\n        \"arity\": 1,\n        \"flags\": [\"readonly\", \"admin\", \"noscript\"],\n        \"key_spec\": (0, 0, 0),\n    },\n    \"SCAN\": {\"arity\": -2, \"flags\": [\"readonly\", \"random\"], \"key_spec\": (0, 0, 0)},\n    \"SCARD\": {\"arity\": 2, \"flags\": [\"readonly\", \"fast\"], \"key_spec\": (1, 1, 1)},\n    \"SCRIPT\": {\n        \"arity\": -2,\n        \"flags\": [\"readonly\", \"admin\", \"noscript\"],\n        \"key_spec\": (0, 0, 0),\n    },\n    \"SDIFF\": {\n        \"arity\": -2,\n        \"flags\": [\"readonly\", \"sort_for_script\"],\n        \"key_spec\": (1, -1, 1),\n    },\n    \"SDIFFSTORE\": {\"arity\": -3, \"flags\": [\"write\", \"denyoom\"], \"key_spec\": (1, -1, 1)},\n    \"SELECT\": {\n        \"arity\": 2,\n        \"flags\": [\"readonly\", \"loading\", \"fast\"],\n        \"key_spec\": (0, 0, 0),\n    },\n    \"SET\": {\"arity\": -3, \"flags\": [\"write\", \"denyoom\"], \"key_spec\": (1, 1, 1)},\n    \"SETBIT\": {\"arity\": 4, \"flags\": [\"write\", \"denyoom\"], \"key_spec\": (1, 1, 1)},\n    \"SETEX\": {\"arity\": 4, \"flags\": [\"write\", \"denyoom\"], \"key_spec\": (1, 1, 1)},\n    \"SETNX\": {\"arity\": 3, \"flags\": [\"write\", \"denyoom\", \"fast\"], \"key_spec\": (1, 1, 1)},\n    \"SETRANGE\": {\"arity\": 4, \"flags\": [\"write\", \"denyoom\"], \"key_spec\": (1, 1, 1)},\n    \"SHUTDOWN\": {\n        \"arity\": -1,\n        \"flags\": [\"readonly\", \"admin\", \"loading\", \"stale\"],\n        \"key_spec\": (0, 0, 0),\n    },\n    \"SINTER\": {\n        \"arity\": -2,\n        \"flags\": [\"readonly\", \"sort_for_script\"],\n        \"key_spec\": (1, -1, 1),\n    },\n    \"SINTERSTORE\": {\"arity\": -3, \"flags\": [\"write\", \"denyoom\"], \"key_spec\": (1, -1, 1)},\n    \"SISMEMBER\": {\"arity\": 3, \"flags\": [\"readonly\", \"fast\"], \"key_spec\": (1, 1, 1)},\n    \"SLAVEOF\": {\n        \"arity\": 3,\n        \"flags\": [\"admin\", \"noscript\", \"stale\"],\n        \"key_spec\": (0, 0, 0),\n    },\n    \"SLOWLOG\": {\"arity\": -2, \"flags\": [\"readonly\"], \"key_spec\": (0, 0, 0)},\n    \"SMEMBERS\": {\n        \"arity\": 2,\n        \"flags\": [\"readonly\", \"sort_for_script\"],\n        \"key_spec\": (1, 1, 1),\n    },\n    \"SMOVE\": {\"arity\": 4, \"flags\": [\"write\", \"fast\"], \"key_spec\": (1, 2, 1)},\n    \"SORT\": {\"arity\": -2, \"flags\": [\"write\", \"denyoom\"], \"key_spec\": (1, 1, 1)},\n    \"SPOP\": {\n        \"arity\": 2,\n        \"flags\": [\"write\", \"noscript\", \"random\", \"fast\"],\n        \"key_spec\": (1, 1, 1),\n    },\n    \"SRANDMEMBER\": {\n        \"arity\": -2,\n        \"flags\": [\"readonly\", \"random\"],\n        \"key_spec\": (1, 1, 1),\n    },\n    \"SREM\": {\"arity\": -3, \"flags\": [\"write\", \"fast\"], \"key_spec\": (1, 1, 1)},\n    \"SSCAN\": {\"arity\": -3, \"flags\": [\"readonly\", \"random\"], \"key_spec\": (1, 1, 1)},\n    \"STRLEN\": {\"arity\": 2, \"flags\": [\"readonly\", \"fast\"], \"key_spec\": (1, 1, 1)},\n    \"SUBSCRIBE\": {\n        \"arity\": -2,\n        \"flags\": [\"readonly\", \"pubsub\", \"noscript\", \"loading\", \"stale\"],\n        \"key_spec\": (0, 0, 0),\n    },\n    \"SUBSTR\": {\"arity\": 4, \"flags\": [\"readonly\"], \"key_spec\": (1, 1, 1)},\n    \"SUNION\": {\n        \"arity\": -2,\n        \"flags\": [\"readonly\", \"sort_for_script\"],\n        \"key_spec\": (1, -1, 1),\n    },\n    \"SUNIONSTORE\": {\"arity\": -3, \"flags\": [\"write\", \"denyoom\"], \"key_spec\": (1, -1, 1)},\n    \"SYNC\": {\n        \"arity\": 1,\n        \"flags\": [\"readonly\", \"admin\", \"noscript\"],\n        \"key_spec\": (0, 0, 0),\n    },\n    \"TIME\": {\n        \"arity\": 1,\n        \"flags\": [\"readonly\", \"random\", \"fast\"],\n        \"key_spec\": (0, 0, 0),\n    },\n    \"TTL\": {\"arity\": 2, \"flags\": [\"readonly\", \"fast\"], \"key_spec\": (1, 1, 1)},\n    \"TYPE\": {\"arity\": 2, \"flags\": [\"readonly\", \"fast\"], \"key_spec\": (1, 1, 1)},\n    \"UNSUBSCRIBE\": {\n        \"arity\": -1,\n        \"flags\": [\"readonly\", \"pubsub\", \"noscript\", \"loading\", \"stale\"],\n        \"key_spec\": (0, 0, 0),\n    },\n    \"UNWATCH\": {\n        \"arity\": 1,\n        \"flags\": [\"readonly\", \"noscript\", \"fast\"],\n        \"key_spec\": (0, 0, 0),\n    },\n    \"WATCH\": {\n        \"arity\": -2,\n        \"flags\": [\"readonly\", \"noscript\", \"fast\"],\n        \"key_spec\": (1, -1, 1),\n    },\n    \"ZADD\": {\"arity\": -4, \"flags\": [\"write\", \"denyoom\", \"fast\"], \"key_spec\": (1, 1, 1)},\n    \"ZCARD\": {\"arity\": 2, \"flags\": [\"readonly\", \"fast\"], \"key_spec\": (1, 1, 1)},\n    \"ZCOUNT\": {\"arity\": 4, \"flags\": [\"readonly\", \"fast\"], \"key_spec\": (1, 1, 1)},\n    \"ZINCRBY\": {\n        \"arity\": 4,\n        \"flags\": [\"write\", \"denyoom\", \"fast\"],\n        \"key_spec\": (1, 1, 1),\n    },\n    \"ZINTERSTORE\": {\n        \"arity\": -4,\n        \"flags\": [\"write\", \"denyoom\", \"movablekeys\"],\n        \"key_spec\": (0, 0, 0),\n    },\n    \"ZLEXCOUNT\": {\"arity\": 4, \"flags\": [\"readonly\", \"fast\"], \"key_spec\": (1, 1, 1)},\n    \"ZRANGE\": {\"arity\": -4, \"flags\": [\"readonly\"], \"key_spec\": (1, 1, 1)},\n    \"ZRANGEBYLEX\": {\"arity\": -4, \"flags\": [\"readonly\"], \"key_spec\": (1, 1, 1)},\n    \"ZRANGEBYSCORE\": {\"arity\": -4, \"flags\": [\"readonly\"], \"key_spec\": (1, 1, 1)},\n    \"ZRANK\": {\"arity\": 3, \"flags\": [\"readonly\", \"fast\"], \"key_spec\": (1, 1, 1)},\n    \"ZREM\": {\"arity\": -3, \"flags\": [\"write\", \"fast\"], \"key_spec\": (1, 1, 1)},\n    \"ZREMRANGEBYLEX\": {\"arity\": 4, \"flags\": [\"write\"], \"key_spec\": (1, 1, 1)},\n    \"ZREMRANGEBYRANK\": {\"arity\": 4, \"flags\": [\"write\"], \"key_spec\": (1, 1, 1)},\n    \"ZREMRANGEBYSCORE\": {\"arity\": 4, \"flags\": [\"write\"], \"key_spec\": (1, 1, 1)},\n    \"ZREVRANGE\": {\"arity\": -4, \"flags\": [\"readonly\"], \"key_spec\": (1, 1, 1)},\n    \"ZREVRANGEBYLEX\": {\"arity\": -4, \"flags\": [\"readonly\"], \"key_spec\": (1, 1, 1)},\n    \"ZREVRANGEBYSCORE\": {\"arity\": -4, \"flags\": [\"readonly\"], \"key_spec\": (1, 1, 1)},\n    \"ZREVRANK\": {\"arity\": 3, \"flags\": [\"readonly\", \"fast\"], \"key_spec\": (1, 1, 1)},\n    \"ZSCAN\": {\"arity\": -3, \"flags\": [\"readonly\", \"random\"], \"key_spec\": (1, 1, 1)},\n    \"ZSCORE\": {\"arity\": 3, \"flags\": [\"readonly\", \"fast\"], \"key_spec\": (1, 1, 1)},\n    \"ZUNIONSTORE\": {\n        \"arity\": -4,\n        \"flags\": [\"write\", \"denyoom\", \"movablekeys\"],\n        \"key_spec\": (0, 0, 0),\n    },\n}\n\n\nif __name__ == \"__main__\":\n    import redis\n    import pprint\n\n    rv = {}\n    for row in redis.Redis().execute_command(\"COMMAND\"):\n        cmd, arity, flags, first_key, last_key, step_count = row\n        rv[cmd.upper()] = {\n            \"arity\": arity,\n            \"flags\": flags,\n            \"key_spec\": (int(first_key), int(last_key), int(step_count)),\n        }\n\n    tail = []\n    with open(__file__.rstrip(\"co\"), \"r+\") as f:\n        for line in f:\n            if line.strip() == \"if __name__ == '__main__':\":\n                tail.append(line)\n                tail.extend(f)\n                break\n\n        f.seek(0)\n        f.truncate(0)\n        f.write(\n            \"# flake8: noqa\\n\\nCOMMANDS = %s\\n\\n\\n%s\"\n            % (pprint.pformat(rv, width=74), \"\".join(tail))\n        )\n"
  },
  {
    "path": "rb/clients.py",
    "content": "import time\nimport errno\nimport socket\n\nfrom weakref import ref as weakref\n\nfrom redis import StrictRedis\nfrom redis.client import list_or_args\nfrom redis.exceptions import ConnectionError\n\ntry:\n    from redis.exceptions import TimeoutError\nexcept ImportError:\n    TimeoutError = ConnectionError\n\nfrom rb.promise import Promise\nfrom rb.poll import poll, is_closed\nfrom rb.utils import izip, iteritems\n\n\nAUTO_BATCH_COMMANDS = {\n    \"GET\": (\"MGET\", True),\n    \"SET\": (\"MSET\", False),\n}\n\n\ndef assert_open(client):\n    if client.closed:\n        raise ValueError(\"I/O operation on closed file\")\n\n\ndef merge_batch(command_name, arg_promise_tuples):\n    batch_command, list_response = AUTO_BATCH_COMMANDS[command_name]\n\n    if len(arg_promise_tuples) == 1:\n        args, promise = arg_promise_tuples[0]\n        return command_name, args, {}, promise\n\n    promise = Promise()\n\n    @promise.done\n    def on_success(value):\n        if list_response:\n            for item, (_, promise) in izip(value, arg_promise_tuples):\n                promise.resolve(item)\n        else:\n            for _, promise in arg_promise_tuples:\n                promise.resolve(value)\n\n    args = []\n    for individual_args, _ in arg_promise_tuples:\n        args.extend(individual_args)\n\n    return batch_command, args, {}, promise\n\n\ndef auto_batch_commands(commands):\n    \"\"\"Given a pipeline of commands this attempts to merge the commands\n    into more efficient ones if that is possible.\n    \"\"\"\n    pending_batch = None\n\n    for command_name, args, options, promise in commands:\n        # This command cannot be batched, return it as such.\n        if command_name not in AUTO_BATCH_COMMANDS:\n            if pending_batch:\n                yield merge_batch(*pending_batch)\n                pending_batch = None\n            yield command_name, args, options, promise\n            continue\n\n        assert not options, \"batch commands cannot merge options\"\n        if pending_batch and pending_batch[0] == command_name:\n            pending_batch[1].append((args, promise))\n        else:\n            if pending_batch:\n                yield merge_batch(*pending_batch)\n            pending_batch = (command_name, [(args, promise)])\n\n    if pending_batch:\n        yield merge_batch(*pending_batch)\n\n\nclass CommandBuffer(object):\n    \"\"\"The command buffer is an internal construct \"\"\"\n\n    def __init__(self, host_id, connect, auto_batch=True):\n        self.host_id = host_id\n        self.connection = None\n        self._connect_func = connect\n        self.connect()\n        self.commands = []\n        self.pending_responses = []\n        self.auto_batch = auto_batch\n        self.sent_something = False\n        self.reconnects = 0\n        self._send_buf = []\n\n    @property\n    def closed(self):\n        \"\"\"Indicates if the command buffer is closed.\"\"\"\n        return self.connection is None or self.connection._sock is None\n\n    def connect(self):\n        if self.connection is not None:\n            return\n        self.connection = self._connect_func()\n        # Ensure we're connected.  Without this, we won't have a socket\n        # we can select over.\n        self.connection.connect()\n\n    def reconnect(self):\n        if self.sent_something:\n            raise RuntimeError(\n                \"Cannot reset command buffer that already \" \"sent out data.\"\n            )\n        if self.reconnects > 5:\n            return False\n        self.reconnects += 1\n        self.connection = None\n        self.connect()\n        return True\n\n    def fileno(self):\n        \"\"\"Returns the file number of the underlying connection's socket\n        to be able to select over it.\n        \"\"\"\n        assert_open(self)\n        return self.connection._sock.fileno()\n\n    def enqueue_command(self, command_name, args, options):\n        \"\"\"Enqueue a new command into this pipeline.\"\"\"\n        assert_open(self)\n        promise = Promise()\n        self.commands.append((command_name, args, options, promise))\n        return promise\n\n    @property\n    def has_pending_requests(self):\n        \"\"\"Indicates if there are outstanding pending requests on this\n        buffer.\n        \"\"\"\n        return bool(self._send_buf or self.commands)\n\n    def send_buffer(self):\n        \"\"\"Utility function that sends the buffer into the provided socket.\n        The buffer itself will slowly clear out and is modified in place.\n        \"\"\"\n        buf = self._send_buf\n        sock = self.connection._sock\n        try:\n            timeout = sock.gettimeout()\n            sock.setblocking(False)\n            try:\n                for idx, item in enumerate(buf):\n                    sent = 0\n                    while 1:\n                        try:\n                            sent = sock.send(item)\n                        except IOError as e:\n                            if e.errno == errno.EAGAIN:\n                                continue\n                            elif e.errno == errno.EWOULDBLOCK:\n                                break\n                            raise\n                        self.sent_something = True\n                        break\n                    if sent < len(item):\n                        buf[: idx + 1] = [item[sent:]]\n                        break\n                else:\n                    del buf[:]\n            finally:\n                sock.settimeout(timeout)\n        except IOError as e:\n            self.connection.disconnect()\n            if isinstance(e, socket.timeout):\n                raise TimeoutError(\"Timeout writing to socket (host %s)\" % self.host_id)\n            raise ConnectionError(\n                \"Error while writing to socket (host %s): %s\" % (self.host_id, e)\n            )\n\n    def send_pending_requests(self):\n        \"\"\"Sends all pending requests into the connection.  The default is\n        to only send pending data that fits into the socket without blocking.\n        This returns `True` if all data was sent or `False` if pending data\n        is left over.\n        \"\"\"\n        assert_open(self)\n\n        unsent_commands = self.commands\n        if unsent_commands:\n            self.commands = []\n\n            if self.auto_batch:\n                unsent_commands = auto_batch_commands(unsent_commands)\n\n            buf = []\n            for command_name, args, options, promise in unsent_commands:\n                buf.append((command_name,) + tuple(args))\n                self.pending_responses.append((command_name, options, promise))\n\n            cmds = self.connection.pack_commands(buf)\n            self._send_buf.extend(cmds)\n\n        if not self._send_buf:\n            return True\n\n        self.send_buffer()\n        return not self._send_buf\n\n    def wait_for_responses(self, client):\n        \"\"\"Waits for all responses to come back and resolves the\n        eventual results.\n        \"\"\"\n        assert_open(self)\n\n        if self.has_pending_requests:\n            raise RuntimeError(\n                \"Cannot wait for responses if there are \"\n                \"pending requests outstanding.  You need \"\n                \"to wait for pending requests to be sent \"\n                \"first.\"\n            )\n\n        pending = self.pending_responses\n        self.pending_responses = []\n        for command_name, options, promise in pending:\n            value = client.parse_response(self.connection, command_name, **options)\n            promise.resolve(value)\n\n\nclass RoutingPool(object):\n    \"\"\"The routing pool works together with the routing client to\n    internally dispatch through the cluster's router to the correct\n    internal connection pool.\n    \"\"\"\n\n    def __init__(self, cluster):\n        self.cluster = cluster\n\n    def get_connection(self, command_name, shard_hint=None):\n        host_id = shard_hint\n        if host_id is None:\n            raise RuntimeError(\"The routing pool requires the host id \" \"as shard hint\")\n\n        real_pool = self.cluster.get_pool_for_host(host_id)\n\n        # When we check something out from the real underlying pool it's\n        # very much possible that the connection is stale.  This is why we\n        # check out up to 10 connections which are either not connected\n        # yet or verified alive.\n        for _ in range(10):\n            con = real_pool.get_connection(command_name)\n            if con._sock is None or not is_closed(con._sock):\n                con.__creating_pool = weakref(real_pool)\n                return con\n\n        raise ConnectionError(\n            \"Failed to check out a valid connection \" \"(host %s)\" % host_id\n        )\n\n    def release(self, connection):\n        # The real pool is referenced by the connection through an\n        # internal weakref.  If the weakref is broken it means the\n        # pool is already gone and we do not need to release the\n        # connection.\n        try:\n            real_pool = connection.__creating_pool()\n        except (AttributeError, TypeError):\n            real_pool = None\n\n        if real_pool is not None:\n            real_pool.release(connection)\n\n    def disconnect(self):\n        self.cluster.disconnect_pools()\n\n    def reset(self):\n        pass\n\n\nclass BaseClient(StrictRedis):\n    pass\n\n\nclass RoutingBaseClient(BaseClient):\n    def __init__(self, connection_pool, auto_batch=True):\n        BaseClient.__init__(self, connection_pool=connection_pool)\n        self.auto_batch = auto_batch\n\n    def pubsub(self, **kwargs):\n        raise NotImplementedError(\"Pubsub is unsupported.\")\n\n    def pipeline(self, transaction=True, shard_hint=None):\n        raise NotImplementedError(\n            \"Manual pipelines are unsupported. rb \" \"automatically pipelines commands.\"\n        )\n\n    def lock(self, *args, **kwargs):\n        raise NotImplementedError(\"Locking is not supported.\")\n\n\nclass MappingClient(RoutingBaseClient):\n    \"\"\"The routing client uses the cluster's router to target an individual\n    node automatically based on the key of the redis command executed.\n\n    For the parameters see :meth:`Cluster.map`.\n    \"\"\"\n\n    def __init__(self, connection_pool, max_concurrency=None, auto_batch=True):\n        RoutingBaseClient.__init__(\n            self, connection_pool=connection_pool, auto_batch=auto_batch\n        )\n        # careful.  If you introduce any other variables here, then make\n        # sure that FanoutClient.target still works correctly!\n        self._max_concurrency = max_concurrency\n        self._cb_poll = poll()\n\n    # For the mapping client we can fix up some redis standard commands\n    # as we are promise based and have some flexibility here.\n\n    def mget(self, keys, *args):\n        args = list_or_args(keys, args)\n        return Promise.all([self.get(arg) for arg in args])\n\n    def mset(self, *args, **kwargs):\n        return Promise.all(\n            [self.set(k, v) for k, v in iteritems(dict(*args, **kwargs))]\n        ).then(lambda x: None)\n\n    # Standard redis methods\n\n    def execute_command(self, *args, **options):\n        router = self.connection_pool.cluster.get_router()\n        host_id = router.get_host_for_command(args[0], args[1:])\n        buf = self._get_command_buffer(host_id, args[0])\n        return buf.enqueue_command(args[0], args[1:], options)\n\n    # Custom Internal API\n\n    def _get_command_buffer(self, host_id, command_name):\n        \"\"\"Returns the command buffer for the given command and arguments.\"\"\"\n        buf = self._cb_poll.get(host_id)\n        if buf is not None:\n            return buf\n\n        if self._max_concurrency is not None:\n            while len(self._cb_poll) >= self._max_concurrency:\n                self.join(timeout=1.0)\n\n        def connect():\n            return self.connection_pool.get_connection(command_name, shard_hint=host_id)\n\n        buf = CommandBuffer(host_id, connect, self.auto_batch)\n        self._cb_poll.register(host_id, buf)\n        return buf\n\n    def _release_command_buffer(self, command_buffer):\n        \"\"\"This is called by the command buffer when it closes.\"\"\"\n        if command_buffer.closed:\n            return\n\n        self._cb_poll.unregister(command_buffer.host_id)\n        self.connection_pool.release(command_buffer.connection)\n        command_buffer.connection = None\n\n    def _send_or_reconnect(self, command_buffer):\n        try:\n            command_buffer.send_pending_requests()\n        except ConnectionError as e:\n            self._try_reconnect(command_buffer, e)\n\n    def _try_reconnect(self, command_buffer, err=None):\n        # If something was sent before, we can't do anything at which\n        # point we just reraise the underlying error.\n        if command_buffer.sent_something:\n            raise err or ConnectionError(\n                \"Cannot reconnect when data was \" \"already sent.\"\n            )\n        self._release_command_buffer(command_buffer)\n        # If we cannot reconnect, reraise the error.\n        if not command_buffer.reconnect():\n            raise err or ConnectionError(\"Too many attempts to reconnect.\")\n        self._cb_poll.register(command_buffer.host_id, command_buffer)\n\n    # Custom Public API\n\n    def join(self, timeout=None):\n        \"\"\"Waits for all outstanding responses to come back or the timeout\n        to be hit.\n        \"\"\"\n        remaining = timeout\n\n        while self._cb_poll and (remaining is None or remaining > 0):\n            now = time.time()\n            rv = self._cb_poll.poll(remaining)\n            if remaining is not None:\n                remaining -= time.time() - now\n\n            for command_buffer, event in rv:\n                # This command buffer still has pending requests which\n                # means we have to send them out first before we can read\n                # all the data from it.\n                if command_buffer.has_pending_requests:\n                    if event == \"close\":\n                        self._try_reconnect(command_buffer)\n                    elif event == \"write\":\n                        self._send_or_reconnect(command_buffer)\n\n                # The general assumption is that all response is available\n                # or this might block.  On reading we do not use async\n                # receiving.  This generally works because latency in the\n                # network is low and redis is super quick in sending.  It\n                # does not make a lot of sense to complicate things here.\n                elif event in (\"read\", \"close\"):\n                    try:\n                        command_buffer.wait_for_responses(self)\n                    finally:\n                        self._release_command_buffer(command_buffer)\n\n        if self._cb_poll and timeout is not None:\n            raise TimeoutError(\"Did not receive all data in time.\")\n\n    def cancel(self):\n        \"\"\"Cancels all outstanding requests.\"\"\"\n        for command_buffer in self._cb_poll:\n            self._release_command_buffer(command_buffer)\n\n\nclass FanoutClient(MappingClient):\n    \"\"\"This works similar to the :class:`MappingClient` but instead of\n    using the router to target hosts, it sends the commands to all manually\n    specified hosts.\n\n    The results are accumulated in a dictionary keyed by the `host_id`.\n\n    For the parameters see :meth:`Cluster.fanout`.\n    \"\"\"\n\n    def __init__(self, hosts, connection_pool, max_concurrency=None, auto_batch=True):\n        MappingClient.__init__(\n            self, connection_pool, max_concurrency, auto_batch=auto_batch\n        )\n        self._target_hosts = hosts\n        self.__is_retargeted = False\n        self.__resolve_singular_result = False\n\n    def target(self, hosts):\n        \"\"\"Temporarily retarget the client for one call.  This is useful\n        when having to deal with a subset of hosts for one call.\n        \"\"\"\n        if self.__is_retargeted:\n            raise TypeError(\"Cannot use target more than once.\")\n        rv = FanoutClient(\n            hosts,\n            connection_pool=self.connection_pool,\n            max_concurrency=self._max_concurrency,\n        )\n        rv._cb_poll = self._cb_poll\n        rv.__is_retargeted = True\n        return rv\n\n    def target_key(self, key):\n        \"\"\"Temporarily retarget the client for one call to route\n        specifically to the one host that the given key routes to.  In\n        that case the result on the promise is just the one host's value\n        instead of a dictionary.\n\n        .. versionadded:: 1.3\n        \"\"\"\n        router = self.connection_pool.cluster.get_router()\n        host_id = router.get_host_for_key(key)\n        rv = self.target([host_id])\n        rv.__resolve_singular_result = True\n        return rv\n\n    def execute_command(self, *args, **options):\n        promises = {}\n\n        hosts = self._target_hosts\n        if hosts == \"all\":\n            hosts = list(self.connection_pool.cluster.hosts.keys())\n        elif hosts is None:\n            raise RuntimeError(\"Fanout client was not targeted to hosts.\")\n\n        for host_id in hosts:\n            buf = self._get_command_buffer(host_id, args[0])\n            promise = buf.enqueue_command(args[0], args[1:], options)\n            if self.__resolve_singular_result and len(hosts) == 1:\n                return promise\n            promises[host_id] = promise\n\n        return Promise.all(promises)\n\n\nclass RoutingClient(RoutingBaseClient):\n    \"\"\"A client that can route to individual targets.\n\n    For the parameters see :meth:`Cluster.get_routing_client`.\n    \"\"\"\n\n    def __init__(self, cluster, auto_batch=True):\n        RoutingBaseClient.__init__(\n            self, connection_pool=RoutingPool(cluster), auto_batch=auto_batch\n        )\n\n    # Standard redis methods\n\n    def execute_command(self, *args, **options):\n        pool = self.connection_pool\n        command_name = args[0]\n        command_args = args[1:]\n        router = self.connection_pool.cluster.get_router()\n        host_id = router.get_host_for_command(command_name, command_args)\n        connection = pool.get_connection(command_name, shard_hint=host_id)\n        try:\n            connection.send_command(*args)\n            return self.parse_response(connection, command_name, **options)\n        except (ConnectionError, TimeoutError) as e:\n            connection.disconnect()\n            if not connection.retry_on_timeout and isinstance(e, TimeoutError):\n                raise\n            connection.send_command(*args)\n            return self.parse_response(connection, command_name, **options)\n        finally:\n            pool.release(connection)\n\n    # Custom Public API\n\n    def get_mapping_client(self, max_concurrency=64, auto_batch=None):\n        \"\"\"Returns a thread unsafe mapping client.  This client works\n        similar to a redis pipeline and returns eventual result objects.\n        It needs to be joined on to work properly.  Instead of using this\n        directly you shold use the :meth:`map` context manager which\n        automatically joins.\n\n        Returns an instance of :class:`MappingClient`.\n        \"\"\"\n        if auto_batch is None:\n            auto_batch = self.auto_batch\n        return MappingClient(\n            connection_pool=self.connection_pool,\n            max_concurrency=max_concurrency,\n            auto_batch=auto_batch,\n        )\n\n    def get_fanout_client(self, hosts, max_concurrency=64, auto_batch=None):\n        \"\"\"Returns a thread unsafe fanout client.\n\n        Returns an instance of :class:`FanoutClient`.\n        \"\"\"\n        if auto_batch is None:\n            auto_batch = self.auto_batch\n        return FanoutClient(\n            hosts,\n            connection_pool=self.connection_pool,\n            max_concurrency=max_concurrency,\n            auto_batch=auto_batch,\n        )\n\n    def map(self, timeout=None, max_concurrency=64, auto_batch=None):\n        \"\"\"Returns a context manager for a map operation.  This runs\n        multiple queries in parallel and then joins in the end to collect\n        all results.\n\n        In the context manager the client available is a\n        :class:`MappingClient`.  Example usage::\n\n            results = {}\n            with cluster.map() as client:\n                for key in keys_to_fetch:\n                    results[key] = client.get(key)\n            for key, promise in results.iteritems():\n                print '%s => %s' % (key, promise.value)\n        \"\"\"\n        return MapManager(\n            self.get_mapping_client(max_concurrency, auto_batch), timeout=timeout\n        )\n\n    def fanout(self, hosts=None, timeout=None, max_concurrency=64, auto_batch=None):\n        \"\"\"Returns a context manager for a map operation that fans out to\n        manually specified hosts instead of using the routing system.  This\n        can for instance be used to empty the database on all hosts.  The\n        context manager returns a :class:`FanoutClient`.  Example usage::\n\n            with cluster.fanout(hosts=[0, 1, 2, 3]) as client:\n                results = client.info()\n            for host_id, info in results.value.iteritems():\n                print '%s -> %s' % (host_id, info['is'])\n\n        The promise returned accumulates all results in a dictionary keyed\n        by the `host_id`.\n\n        The `hosts` parameter is a list of `host_id`\\s or alternatively the\n        string ``'all'`` to send the commands to all hosts.\n\n        The fanout APi needs to be used with a lot of care as it can cause\n        a lot of damage when keys are written to hosts that do not expect\n        them.\n        \"\"\"\n        return MapManager(\n            self.get_fanout_client(hosts, max_concurrency, auto_batch), timeout=timeout\n        )\n\n\nclass LocalClient(BaseClient):\n    \"\"\"The local client is just a convenient method to target one specific\n    host.\n    \"\"\"\n\n    def __init__(self, connection_pool=None, **kwargs):\n        if connection_pool is None:\n            raise TypeError(\"The local client needs a connection pool\")\n        BaseClient.__init__(self, connection_pool=connection_pool, **kwargs)\n\n\nclass MapManager(object):\n    \"\"\"Helps with mapping.\"\"\"\n\n    def __init__(self, mapping_client, timeout):\n        self.mapping_client = mapping_client\n        self.timeout = timeout\n        self.entered = None\n\n    def __enter__(self):\n        self.entered = time.time()\n        return self.mapping_client\n\n    def __exit__(self, exc_type, exc_value, tb):\n        if exc_type is not None:\n            self.mapping_client.cancel()\n        else:\n            timeout = self.timeout\n            if timeout is not None:\n                timeout = max(1, timeout - (time.time() - self.entered))\n            self.mapping_client.join(timeout=timeout)\n"
  },
  {
    "path": "rb/cluster.py",
    "content": "from redis.connection import ConnectionPool, UnixDomainSocketConnection\n\ntry:\n    from redis.commands.core import Script  # redis>=5\nexcept ImportError:\n    from redis.client import Script  # redis<5\n\ntry:\n    from redis.connection import SSLConnection\nexcept ImportError:\n    SSLConnection = None\n\nimport functools\nfrom hashlib import sha1\nfrom threading import Lock\n\nfrom rb.router import PartitionRouter\nfrom rb.clients import RoutingClient, LocalClient\nfrom rb.utils import integer_types, iteritems, itervalues\n\n\nclass HostInfo(object):\n    def __init__(\n        self,\n        host_id,\n        host,\n        port,\n        unix_socket_path=None,\n        db=0,\n        password=None,\n        ssl=False,\n        ssl_options=None,\n    ):\n        self.host_id = host_id\n        self.host = host\n        self.unix_socket_path = unix_socket_path\n        self.port = port\n        self.db = db\n        self.password = password\n        self.ssl = ssl\n        self.ssl_options = ssl_options\n\n    def __eq__(self, other):\n        if self.__class__ is not other.__class__:\n            return NotImplemented\n        return self.host_id == other.host_id\n\n    def __ne__(self, other):\n        rv = self.__eq__(other)\n        if rv is NotImplemented:\n            return NotImplemented\n        return not rv\n\n    def __hash__(self):\n        return self.host_id\n\n    def __repr__(self):\n        return \"<%s %s>\" % (\n            self.__class__.__name__,\n            \" \".join(\"%s=%r\" % x for x in sorted(self.__dict__.items())),\n        )\n\n\ndef _iter_hosts(iterable):\n    if isinstance(iterable, dict):\n        iterable = iteritems(iterable)\n    for item in iterable:\n        if isinstance(item, tuple):\n            host_id, cfg = item\n            cfg = dict(cfg)\n            cfg[\"host_id\"] = host_id\n        else:\n            cfg = item\n        yield cfg\n\n\nclass Cluster(object):\n    \"\"\"The cluster is the core object behind rb.  It holds the connection\n    pools to the individual nodes and can be shared for the duration of\n    the application in a central location.\n\n    Basic example of a cluster over four redis instances with the default\n    router::\n\n        cluster = Cluster(hosts={\n            0: {'port': 6379},\n            1: {'port': 6380},\n            2: {'port': 6381},\n            3: {'port': 6382},\n        }, host_defaults={\n            'host': '127.0.0.1',\n        })\n\n    `hosts` is a dictionary of hosts which maps the number host IDs to\n    configuration parameters.  The parameters correspond to the signature\n    of the :meth:`add_host` function.  The defaults for these parameters\n    are pulled from `host_defaults`.  To override the pool class the\n    `pool_cls` and `pool_options` parameters can be used.  The same\n    applies to `router_cls` and `router_options` for the router.  The pool\n    options are useful for setting socket timeouts and similar parameters.\n    \"\"\"\n\n    def __init__(\n        self,\n        hosts,\n        host_defaults=None,\n        pool_cls=None,\n        pool_options=None,\n        router_cls=None,\n        router_options=None,\n    ):\n        if pool_cls is None:\n            pool_cls = ConnectionPool\n        if router_cls is None:\n            router_cls = PartitionRouter\n        self._lock = Lock()\n        self.pool_cls = pool_cls\n        self.pool_options = pool_options\n        self.router_cls = router_cls\n        self.router_options = router_options\n        self._pools = {}\n        self._router = None\n        self.hosts = {}\n        self._hosts_age = 0\n        self.host_defaults = host_defaults or {}\n        for host_config in _iter_hosts(hosts):\n            if self.host_defaults:\n                for k, v in iteritems(self.host_defaults):\n                    host_config.setdefault(k, v)\n            self.add_host(**host_config)\n\n    def add_host(\n        self,\n        host_id=None,\n        host=\"localhost\",\n        port=6379,\n        unix_socket_path=None,\n        db=0,\n        password=None,\n        ssl=False,\n        ssl_options=None,\n    ):\n        \"\"\"Adds a new host to the cluster.  This is only really useful for\n        unittests as normally hosts are added through the constructor and\n        changes after the cluster has been used for the first time are\n        unlikely to make sense.\n        \"\"\"\n        if host_id is None:\n            raise RuntimeError(\"Host ID is required\")\n        elif not isinstance(host_id, integer_types):\n            raise ValueError(\"The host ID has to be an integer\")\n        host_id = int(host_id)\n        with self._lock:\n            if host_id in self.hosts:\n                raise TypeError(\"Two hosts share the same host id (%r)\" % (host_id,))\n            self.hosts[host_id] = HostInfo(\n                host_id=host_id,\n                host=host,\n                port=port,\n                db=db,\n                unix_socket_path=unix_socket_path,\n                password=password,\n                ssl=ssl,\n                ssl_options=ssl_options,\n            )\n            self._hosts_age += 1\n\n    def remove_host(self, host_id):\n        \"\"\"Removes a host from the client.  This only really useful for\n        unittests.\n        \"\"\"\n        with self._lock:\n            rv = self._hosts.pop(host_id, None) is not None\n            pool = self._pools.pop(host_id, None)\n            if pool is not None:\n                pool.disconnect()\n            self._hosts_age += 1\n            return rv\n\n    def disconnect_pools(self):\n        \"\"\"Disconnects all connections from the internal pools.\"\"\"\n        with self._lock:\n            for pool in itervalues(self._pools):\n                pool.disconnect()\n            self._pools.clear()\n\n    def get_router(self):\n        \"\"\"Returns the router for the cluster.  If the cluster reconfigures\n        the router will be recreated.  Usually you do not need to interface\n        with the router yourself as the cluster's routing client does that\n        automatically.\n\n        This returns an instance of :class:`BaseRouter`.\n        \"\"\"\n        cached_router = self._router\n        ref_age = self._hosts_age\n\n        if cached_router is not None:\n            router, router_age = cached_router\n            if router_age == ref_age:\n                return router\n\n        with self._lock:\n            router = self.router_cls(self, **(self.router_options or {}))\n            self._router = (router, ref_age)\n            return router\n\n    def get_pool_for_host(self, host_id):\n        \"\"\"Returns the connection pool for the given host.\n\n        This connection pool is used by the redis clients to make sure\n        that it does not have to reconnect constantly.  If you want to use\n        a custom redis client you can pass this in as connection pool\n        manually.\n        \"\"\"\n        if isinstance(host_id, HostInfo):\n            host_info = host_id\n            host_id = host_info.host_id\n        else:\n            host_info = self.hosts.get(host_id)\n            if host_info is None:\n                raise LookupError(\"Host %r does not exist\" % (host_id,))\n\n        rv = self._pools.get(host_id)\n        if rv is not None:\n            return rv\n        with self._lock:\n            rv = self._pools.get(host_id)\n            if rv is None:\n                opts = dict(self.pool_options or ())\n                opts[\"db\"] = host_info.db\n                opts[\"password\"] = host_info.password\n                if host_info.unix_socket_path is not None:\n                    opts[\"path\"] = host_info.unix_socket_path\n                    opts[\"connection_class\"] = UnixDomainSocketConnection\n                    if host_info.ssl:\n                        raise TypeError(\n                            \"SSL is not supported for unix \" \"domain sockets.\"\n                        )\n                else:\n                    opts[\"host\"] = host_info.host\n                    opts[\"port\"] = host_info.port\n                    if host_info.ssl:\n                        if SSLConnection is None:\n                            raise TypeError(\n                                \"This version of py-redis does \"\n                                \"not support SSL connections.\"\n                            )\n                        opts[\"connection_class\"] = SSLConnection\n                        opts.update(\n                            (\"ssl_\" + k, v)\n                            for k, v in iteritems(host_info.ssl_options or {})\n                        )\n                rv = self.pool_cls(**opts)\n                self._pools[host_id] = rv\n            return rv\n\n    def get_local_client(self, host_id):\n        \"\"\"Returns a localized client for a specific host ID.  This client\n        works like a regular Python redis client and returns results\n        immediately.\n        \"\"\"\n        return LocalClient(connection_pool=self.get_pool_for_host(host_id))\n\n    def get_local_client_for_key(self, key):\n        \"\"\"Similar to :meth:`get_local_client_for_key` but returns the\n        client based on what the router says the key destination is.\n        \"\"\"\n        return self.get_local_client(self.get_router().get_host_for_key(key))\n\n    def get_routing_client(self, auto_batch=True):\n        \"\"\"Returns a routing client.  This client is able to automatically\n        route the requests to the individual hosts.  It's thread safe and\n        can be used similar to the host local client but it will refused\n        to execute commands that cannot be directly routed to an\n        individual node.\n\n        The default behavior for the routing client is to attempt to batch\n        eligible commands into batch versions thereof.  For instance multiple\n        `GET` commands routed to the same node can end up merged into an\n        `MGET` command.  This behavior can be disabled by setting `auto_batch`\n        to `False`.  This can be useful for debugging because `MONITOR` will\n        more accurately reflect the commands issued in code.\n\n        See :class:`RoutingClient` for more information.\n        \"\"\"\n        return RoutingClient(self, auto_batch=auto_batch)\n\n    def map(self, timeout=None, max_concurrency=64, auto_batch=True):\n        \"\"\"Shortcut context manager for getting a routing client, beginning\n        a map operation and joining over the result.  `max_concurrency`\n        defines how many outstanding parallel queries can exist before an\n        implicit join takes place.\n\n        In the context manager the client available is a\n        :class:`MappingClient`.  Example usage::\n\n            results = {}\n            with cluster.map() as client:\n                for key in keys_to_fetch:\n                    results[key] = client.get(key)\n            for key, promise in results.iteritems():\n                print '%s => %s' % (key, promise.value)\n        \"\"\"\n        return self.get_routing_client(auto_batch).map(\n            timeout=timeout, max_concurrency=max_concurrency\n        )\n\n    def fanout(self, hosts=None, timeout=None, max_concurrency=64, auto_batch=True):\n        \"\"\"Shortcut context manager for getting a routing client, beginning\n        a fanout operation and joining over the result.\n\n        In the context manager the client available is a\n        :class:`FanoutClient`.  Example usage::\n\n            with cluster.fanout(hosts='all') as client:\n                client.flushdb()\n        \"\"\"\n        return self.get_routing_client(auto_batch).fanout(\n            hosts=hosts, timeout=timeout, max_concurrency=max_concurrency\n        )\n\n    def all(self, timeout=None, max_concurrency=64, auto_batch=True):\n        \"\"\"Fanout to all hosts.  Works otherwise exactly like :meth:`fanout`.\n\n        Example::\n\n            with cluster.all() as client:\n                client.flushdb()\n        \"\"\"\n        return self.fanout(\n            \"all\",\n            timeout=timeout,\n            max_concurrency=max_concurrency,\n            auto_batch=auto_batch,\n        )\n\n    def execute_commands(self, mapping, *args, **kwargs):\n        \"\"\"Concurrently executes a sequence of commands on a Redis cluster that\n        are associated with a routing key, returning a new mapping where\n        values are a list of results that correspond to the command in the same\n        position. For example::\n\n            >>> cluster.execute_commands({\n            ...   'foo': [\n            ...     ('PING',),\n            ...     ('TIME',),\n            ...   ],\n            ...   'bar': [\n            ...     ('CLIENT', 'GETNAME'),\n            ...   ],\n            ... })\n            {'bar': [<Promise None>],\n             'foo': [<Promise True>, <Promise (1454446079, 418404)>]}\n\n        Commands that are instances of :class:`redis.client.Script` will first\n        be checked for their existence on the target nodes then loaded on the\n        targets before executing and can be interleaved with other commands::\n\n            >>> from redis.client import Script\n            >>> TestScript = Script(None, 'return {KEYS, ARGV}')\n            >>> cluster.execute_commands({\n            ...   'foo': [\n            ...     (TestScript, ('key:1', 'key:2'), range(0, 3)),\n            ...   ],\n            ...   'bar': [\n            ...     (TestScript, ('key:3', 'key:4'), range(3, 6)),\n            ...   ],\n            ... })\n            {'bar': [<Promise [['key:3', 'key:4'], ['3', '4', '5']]>],\n             'foo': [<Promise [['key:1', 'key:2'], ['0', '1', '2']]>]}\n\n        Internally, :class:`FanoutClient` is used for issuing commands.\n        \"\"\"\n\n        def is_script_command(command):\n            return isinstance(command[0], Script)\n\n        def check_script_load_result(script, result):\n            if script.sha != result:\n                raise AssertionError(\n                    \"Hash mismatch loading {!r}: expected {!r}, got {!r}\".format(\n                        script, script.sha, result,\n                    )\n                )\n\n        # Run through all the commands and check to see if there are any\n        # scripts, and whether or not they have been loaded onto the target\n        # hosts.\n        exists = {}\n        with self.fanout(*args, **kwargs) as client:\n            for key, commands in mapping.items():\n                targeted = client.target_key(key)\n                for command in filter(is_script_command, commands):\n                    script = command[0]\n\n                    # Set the script hash if it hasn't already been set.\n                    if not script.sha:\n                        script.sha = sha1(script.script.encode(\"utf-8\")).hexdigest()\n\n                    # Check if the script has been loaded on each host that it\n                    # will be executed on.\n                    for host in targeted._target_hosts:\n                        if script not in exists.setdefault(host, {}):\n                            exists[host][script] = targeted.execute_command(\n                                \"SCRIPT EXISTS\", script.sha\n                            )\n\n        # Execute the pending commands, loading scripts onto servers where they\n        # do not already exist.\n        results = {}\n        with self.fanout(*args, **kwargs) as client:\n            for key, commands in mapping.items():\n                results[key] = []\n                targeted = client.target_key(key)\n                for command in commands:\n                    # If this command is a script, we need to check and see if\n                    # it needs to be loaded before execution.\n                    if is_script_command(command):\n                        script = command[0]\n                        for host in targeted._target_hosts:\n                            if script in exists[host]:\n                                result = exists[host].pop(script)\n                                if not result.value[0]:\n                                    targeted.execute_command(\n                                        \"SCRIPT LOAD\", script.script\n                                    ).done(\n                                        on_success=functools.partial(\n                                            check_script_load_result, script\n                                        )\n                                    )\n                        keys, arguments = command[1:]\n                        parameters = list(keys) + list(arguments)\n                        results[key].append(\n                            targeted.execute_command(\n                                \"EVALSHA\", script.sha, len(keys), *parameters\n                            )\n                        )\n                    else:\n                        results[key].append(targeted.execute_command(*command))\n\n        return results\n"
  },
  {
    "path": "rb/ketama.py",
    "content": "import hashlib\nimport math\n\nfrom bisect import bisect\n\nfrom rb.utils import text_type, integer_types, bytes_type\n\n\ndef md5_bytes(key):\n    if isinstance(key, text_type):\n        k = key.encode(\"utf-8\")\n    elif isinstance(key, integer_types):\n        k = text_type(key).encode(\"utf-8\")\n    else:\n        k = bytes_type(key)\n\n    return bytearray(hashlib.md5(k).digest())\n\n\nclass Ketama(object):\n    \"\"\"This class implements the Ketama consistent hashing algorithm.\n    \"\"\"\n\n    def __init__(self, nodes=None, weights=None):\n        self._nodes = set(nodes or [])\n        self._weights = weights if weights else {}\n\n        self._rebuild_circle()\n\n    def _rebuild_circle(self):\n        \"\"\"Updates the hash ring.\"\"\"\n        self._hashring = {}\n        self._sorted_keys = []\n        total_weight = 0\n        for node in self._nodes:\n            total_weight += self._weights.get(node, 1)\n\n        for node in self._nodes:\n            weight = self._weights.get(node, 1)\n\n            ks = math.floor((40 * len(self._nodes) * weight) / total_weight)\n\n            for i in range(0, int(ks)):\n                k = md5_bytes(\"%s-%s-salt\" % (node, i))\n\n                for l in range(0, 4):\n                    key = (\n                        (k[3 + l * 4] << 24)\n                        | (k[2 + l * 4] << 16)\n                        | (k[1 + l * 4] << 8)\n                        | k[l * 4]\n                    )\n                    self._hashring[key] = node\n                    self._sorted_keys.append(key)\n\n        self._sorted_keys.sort()\n\n    def _get_node_pos(self, key):\n        \"\"\"Return node position(integer) for a given key or None.\"\"\"\n        if not self._hashring:\n            return\n\n        k = md5_bytes(key)\n        key = (k[3] << 24) | (k[2] << 16) | (k[1] << 8) | k[0]\n\n        nodes = self._sorted_keys\n        pos = bisect(nodes, key)\n\n        if pos == len(nodes):\n            return 0\n        return pos\n\n    def remove_node(self, node):\n        \"\"\"Removes node from circle and rebuild it.\"\"\"\n        try:\n            self._nodes.remove(node)\n            del self._weights[node]\n        except (KeyError, ValueError):\n            pass\n        self._rebuild_circle()\n\n    def add_node(self, node, weight=1):\n        \"\"\"Adds node to circle and rebuild it.\"\"\"\n        self._nodes.add(node)\n        self._weights[node] = weight\n        self._rebuild_circle()\n\n    def get_node(self, key):\n        \"\"\"Return node for a given key. Else return None.\"\"\"\n        pos = self._get_node_pos(key)\n        if pos is None:\n            return None\n        return self._hashring[self._sorted_keys[pos]]\n"
  },
  {
    "path": "rb/poll.py",
    "content": "import fcntl\nimport array\nimport select\nimport termios\n\n\nclass BasePoller(object):\n    is_available = False\n\n    def __init__(self):\n        self.objects = {}\n\n    def register(self, key, f):\n        self.objects[key] = f\n\n    def unregister(self, key):\n        return self.objects.pop(key, None)\n\n    def poll(self, timeout=None):\n        raise NotImplementedError()\n\n    def get(self, key):\n        return self.objects.get(key)\n\n    def __len__(self):\n        return len(self.objects)\n\n    def __iter__(self):\n        # Make a copy when iterating so that modifications to this object\n        # are possible while we're going over it.\n        return iter(self.objects.values())\n\n\nclass SelectPoller(BasePoller):\n    is_available = hasattr(select, \"select\")\n\n    def poll(self, timeout=None):\n        objs = list(self.objects.values())\n        rlist, wlist, xlist = select.select(objs, objs, [], timeout)\n        if xlist:\n            raise RuntimeError(\"Got unexpected OOB data\")\n        return [(x, \"read\") for x in rlist] + [(x, \"write\") for x in wlist]\n\n\nclass PollPoller(BasePoller):\n    is_available = hasattr(select, \"poll\")\n\n    def __init__(self):\n        BasePoller.__init__(self)\n        self.pollobj = select.poll()\n        self.fd_to_object = {}\n\n    def register(self, key, f):\n        BasePoller.register(self, key, f)\n        self.pollobj.register(\n            f.fileno(), select.POLLIN | select.POLLOUT | select.POLLHUP\n        )\n        self.fd_to_object[f.fileno()] = f\n\n    def unregister(self, key):\n        rv = BasePoller.unregister(self, key)\n        if rv is not None:\n            self.pollobj.unregister(rv.fileno())\n            self.fd_to_object.pop(rv.fileno(), None)\n        return rv\n\n    def poll(self, timeout=None):\n        rv = []\n        for fd, event in self.pollobj.poll(timeout):\n            obj = self.fd_to_object[fd]\n            if event & select.POLLIN:\n                rv.append((obj, \"read\"))\n            if event & select.POLLOUT:\n                rv.append((obj, \"write\"))\n            if event & select.POLLHUP:\n                rv.append((obj, \"close\"))\n        return rv\n\n\nclass KQueuePoller(BasePoller):\n    is_available = hasattr(select, \"kqueue\")\n\n    def __init__(self):\n        BasePoller.__init__(self)\n        self.kqueue = select.kqueue()\n        self.events = []\n        self.event_to_object = {}\n\n    def register(self, key, f):\n        BasePoller.register(self, key, f)\n        r_event = select.kevent(\n            f.fileno(),\n            filter=select.KQ_FILTER_READ,\n            flags=select.KQ_EV_ADD | select.KQ_EV_ENABLE,\n        )\n        self.events.append(r_event)\n        w_event = select.kevent(\n            f.fileno(),\n            filter=select.KQ_FILTER_WRITE,\n            flags=select.KQ_EV_ADD | select.KQ_EV_ENABLE,\n        )\n        self.events.append(w_event)\n        self.event_to_object[f.fileno()] = f\n\n    def unregister(self, key):\n        rv = BasePoller.unregister(self, key)\n        if rv is not None:\n            fd = rv.fileno()\n            self.events = [x for x in self.events if x.ident != fd]\n            self.event_to_object.pop(fd, None)\n        return rv\n\n    def poll(self, timeout=None):\n        events = self.kqueue.control(self.events, 128, timeout)\n        rv = []\n        for ev in events:\n            obj = self.event_to_object.get(ev.ident)\n            if obj is None:\n                # It happens surprisingly frequently that kqueue returns\n                # write events things no longer in the kqueue.  Not sure\n                # why\n                continue\n            if ev.filter == select.KQ_FILTER_READ:\n                rv.append((obj, \"read\"))\n            elif ev.filter == select.KQ_FILTER_WRITE:\n                rv.append((obj, \"write\"))\n            if ev.flags & select.KQ_EV_EOF:\n                rv.append((obj, \"close\"))\n        return rv\n\n\nclass EpollPoller(BasePoller):\n    is_available = hasattr(select, \"epoll\")\n\n    def __init__(self):\n        BasePoller.__init__(self)\n        self.epoll = select.epoll()\n        self.fd_to_object = {}\n\n    def register(self, key, f):\n        BasePoller.register(self, key, f)\n        self.epoll.register(\n            f.fileno(), select.EPOLLIN | select.EPOLLHUP | select.EPOLLOUT\n        )\n        self.fd_to_object[f.fileno()] = f\n\n    def unregister(self, key):\n        rv = BasePoller.unregister(self, key)\n        if rv is not None:\n            self.epoll.unregister(rv.fileno())\n            self.fd_to_object.pop(rv.fileno(), None)\n        return rv\n\n    def poll(self, timeout=None):\n        if timeout is None:\n            timeout = -1\n        rv = []\n        for fd, event in self.epoll.poll(timeout):\n            obj = self.fd_to_object[fd]\n            if event & select.EPOLLIN:\n                rv.append((obj, \"read\"))\n            if event & select.EPOLLOUT:\n                rv.append((obj, \"write\"))\n            if event & select.EPOLLHUP:\n                rv.append((obj, \"close\"))\n        return rv\n\n\ndef _is_closed_select(f):\n    rlist, wlist, _ = select.select([f], [f], [], 0.0)\n    if not rlist and not wlist:\n        return False\n    buf = array.array(\"i\", [0])\n    fcntl.ioctl(f.fileno(), termios.FIONREAD, buf)\n    return buf[0] == 0\n\n\ndef _is_closed_poll(f):\n    poll = select.poll()\n    poll.register(f.fileno(), select.POLLHUP)\n    for _, event in poll.poll(0.0):\n        if event == \"close\":\n            return True\n    return False\n\n\ndef _is_closed_kqueue(f):\n    kqueue = select.kqueue()\n    event = select.kevent(\n        f.fileno(),\n        filter=select.KQ_FILTER_READ,\n        flags=select.KQ_EV_ADD | select.KQ_EV_ENABLE,\n    )\n    for event in kqueue.control([event], 128, 0.0):\n        if event.flags & select.KQ_EV_EOF:\n            return True\n    return False\n\n\ndef is_closed(f):\n    if KQueuePoller.is_available:\n        return _is_closed_kqueue(f)\n    if PollPoller.is_available:\n        return _is_closed_poll(f)\n    return _is_closed_select(f)\n\n\navailable_pollers = [\n    poll\n    for poll in [KQueuePoller, PollPoller, EpollPoller, SelectPoller]\n    if poll.is_available\n]\npoll = available_pollers[0]\n"
  },
  {
    "path": "rb/promise.py",
    "content": "from functools import partial\n\nfrom rb.utils import iteritems\n\n\nclass Promise(object):\n    \"\"\"A promise object that attempts to mirror the ES6 APIs for promise\n    objects.  Unlike ES6 promises this one however also directly gives\n    access to the underlying value and it has some slightly different\n    static method names as this promise can be resolved externally.\n    \"\"\"\n\n    __slots__ = (\"value\", \"reason\", \"_state\", \"_callbacks\", \"_errbacks\")\n\n    def __init__(self):\n        #: the value that this promise holds if it's resolved.\n        self.value = None\n        #: the reason for this promise if it's rejected.\n        self.reason = None\n        self._state = \"pending\"\n        self._callbacks = []\n        self._errbacks = []\n\n    @staticmethod\n    def resolved(value):\n        \"\"\"Creates a promise object resolved with a certain value.\"\"\"\n        p = Promise()\n        p._state = \"resolved\"\n        p.value = value\n        return p\n\n    @staticmethod\n    def rejected(reason):\n        \"\"\"Creates a promise object rejected with a certain value.\"\"\"\n        p = Promise()\n        p._state = \"rejected\"\n        p.reason = reason\n        return p\n\n    @staticmethod\n    def all(iterable_or_dict):\n        \"\"\"A promise that resolves when all passed promises resolve.  You can\n        either pass a list or a dictionary of promises.\n        \"\"\"\n        if isinstance(iterable_or_dict, dict):\n            return _promise_from_dict(iterable_or_dict)\n        return _promise_from_iterable(iterable_or_dict)\n\n    def resolve(self, value):\n        \"\"\"Resolves the promise with the given value.\"\"\"\n        if self is value:\n            raise TypeError(\"Cannot resolve promise with itself.\")\n\n        if isinstance(value, Promise):\n            value.done(self.resolve, self.reject)\n            return\n\n        if self._state != \"pending\":\n            raise RuntimeError(\"Promise is no longer pending.\")\n\n        self.value = value\n        self._state = \"resolved\"\n        callbacks = self._callbacks\n        self._callbacks = None\n        for callback in callbacks:\n            callback(value)\n\n    def reject(self, reason):\n        \"\"\"Rejects the promise with the given reason.\"\"\"\n        if self._state != \"pending\":\n            raise RuntimeError(\"Promise is no longer pending.\")\n\n        self.reason = reason\n        self._state = \"rejected\"\n        errbacks = self._errbacks\n        self._errbacks = None\n        for errback in errbacks:\n            errback(reason)\n\n    @property\n    def is_pending(self):\n        \"\"\"`True` if the promise is still pending, `False` otherwise.\"\"\"\n        return self._state == \"pending\"\n\n    @property\n    def is_resolved(self):\n        \"\"\"`True` if the promise was resolved, `False` otherwise.\"\"\"\n        return self._state == \"resolved\"\n\n    @property\n    def is_rejected(self):\n        \"\"\"`True` if the promise was rejected, `False` otherwise.\"\"\"\n        return self._state == \"rejected\"\n\n    def done(self, on_success=None, on_failure=None):\n        \"\"\"Attaches some callbacks to the promise and returns the promise.\"\"\"\n        if on_success is not None:\n            if self._state == \"pending\":\n                self._callbacks.append(on_success)\n            elif self._state == \"resolved\":\n                on_success(self.value)\n        if on_failure is not None:\n            if self._state == \"pending\":\n                self._errbacks.append(on_failure)\n            elif self._state == \"rejected\":\n                on_failure(self.reason)\n        return self\n\n    def then(self, success=None, failure=None):\n        \"\"\"A utility method to add success and/or failure callback to the\n        promise which will also return another promise in the process.\n        \"\"\"\n        rv = Promise()\n\n        def on_success(v):\n            try:\n                rv.resolve(success(v))\n            except Exception as e:\n                rv.reject(e)\n\n        def on_failure(r):\n            try:\n                rv.resolve(failure(r))\n            except Exception as e:\n                rv.reject(e)\n\n        self.done(on_success, on_failure)\n        return rv\n\n    def __repr__(self):\n        if self._state == \"pending\":\n            v = \"(pending)\"\n        elif self._state == \"rejected\":\n            v = repr(self.reason) + \" (rejected)\"\n        else:\n            v = repr(self.value)\n        return \"<%s %s>\" % (self.__class__.__name__, v,)\n\n\ndef _ensure_promise(value):\n    return value if isinstance(value, Promise) else Promise.resolved(value)\n\n\ndef _promise_from_iterable(iterable):\n    l = [_ensure_promise(x) for x in iterable]\n    if not l:\n        return Promise.resolved([])\n\n    pending = set(l)\n    rv = Promise()\n\n    def on_success(promise, value):\n        pending.discard(promise)\n        if not pending:\n            rv.resolve([p.value for p in l])\n\n    for promise in l:\n        promise.done(partial(on_success, promise), rv.reject)\n\n    return rv\n\n\ndef _promise_from_dict(d):\n    d = dict((k, _ensure_promise(v)) for k, v in iteritems(d))\n    if not d:\n        return Promise.resolved({})\n\n    pending = set(d.keys())\n    rv = Promise()\n\n    def on_success(key, value):\n        pending.discard(key)\n        if not pending:\n            rv.resolve(dict((k, p.value) for k, p in iteritems(d)))\n\n    for key, promise in iteritems(d):\n        promise.done(partial(on_success, key), rv.reject)\n\n    return rv\n"
  },
  {
    "path": "rb/router.py",
    "content": "from weakref import ref as weakref\n\nfrom rb.ketama import Ketama\nfrom rb.utils import text_type, bytes_type, integer_types, crc32\nfrom rb._rediscommands import COMMANDS\n\n\nclass UnroutableCommand(Exception):\n    \"\"\"Raised if a command was issued that cannot be routed through the\n    router to a single host.\n    \"\"\"\n\n\nclass BadHostSetup(Exception):\n    \"\"\"Raised if the cluster's host setup is not compatible with the\n    router.\n    \"\"\"\n\n\ndef extract_keys(args, key_spec):\n    first, last, step = key_spec\n\n    rv = []\n    for idx, arg in enumerate(args, 1):\n        if last >= 0 and idx > last:\n            break\n        if idx >= first and ((idx - first) % step) == 0:\n            rv.append(arg)\n    return rv\n\n\ndef assert_gapless_hosts(hosts):\n    if not hosts:\n        raise BadHostSetup(\"No hosts were configured.\")\n    for x in range(len(hosts)):\n        if hosts.get(x) is None:\n            raise BadHostSetup(\n                'Expected host with ID \"%d\" but no such ' \"host was found.\" % x\n            )\n\n\nclass BaseRouter(object):\n    \"\"\"Baseclass for all routers.  If you want to implement a custom router\n    this is what you subclass.\n    \"\"\"\n\n    def __init__(self, cluster):\n        # this is a weakref because the router is cached on the cluster\n        # and otherwise we end up in circular reference land and we are\n        # having problems being garbage collected.\n        self._cluster = weakref(cluster)\n\n    @property\n    def cluster(self):\n        \"\"\"Reference back to the :class:`Cluster` this router belongs to.\"\"\"\n        rv = self._cluster()\n        if rv is None:\n            raise RuntimeError(\"Cluster went away\")\n        return rv\n\n    def get_key(self, command, args):\n        \"\"\"Returns the key a command operates on.\"\"\"\n        spec = COMMANDS.get(command.upper())\n\n        if spec is None:\n            raise UnroutableCommand(\n                'The command \"%r\" is unknown to the '\n                \"router and cannot be handled as a \"\n                \"result.\" % command\n            )\n\n        if \"movablekeys\" in spec[\"flags\"]:\n            raise UnroutableCommand(\n                'The keys for \"%r\" are movable and '\n                \"as such cannot be routed to a single \"\n                \"host.\"\n            )\n\n        keys = extract_keys(args, spec[\"key_spec\"])\n        if len(keys) == 1:\n            return keys[0]\n        elif not keys:\n            raise UnroutableCommand(\n                'The command \"%r\" does not operate on a key which means '\n                \"that no suitable host could be determined.  Consider \"\n                \"using a fanout instead.\"\n            )\n\n        raise UnroutableCommand(\n            'The command \"%r\" operates on multiple keys (%d passed) which is '\n            \"something that is not supported.\" % (command, len(keys))\n        )\n\n    def get_host_for_command(self, command, args):\n        \"\"\"Returns the host this command should be executed against.\"\"\"\n        return self.get_host_for_key(self.get_key(command, args))\n\n    def get_host_for_key(self, key):\n        \"\"\"Perform routing and return host_id of the target.\n\n        Subclasses need to implement this.\n        \"\"\"\n        raise NotImplementedError()\n\n\nclass ConsistentHashingRouter(BaseRouter):\n    \"\"\"Router that returns the host_id based on a consistent hashing\n    algorithm.  The consistent hashing algorithm only works if a key\n    argument is provided.\n\n    This router requires that the hosts are gapless which means that\n    the IDs for N hosts range from 0 to N-1.\n    \"\"\"\n\n    def __init__(self, cluster):\n        BaseRouter.__init__(self, cluster)\n        self._host_id_id_map = dict(self.cluster.hosts.items())\n        self._hash = Ketama(self._host_id_id_map.values())\n        assert_gapless_hosts(self.cluster.hosts)\n\n    def get_host_for_key(self, key):\n        rv = self._hash.get_node(key)\n        if rv is None:\n            raise UnroutableCommand(\"Did not find a suitable \" \"host for the key.\")\n        return rv\n\n\nclass PartitionRouter(BaseRouter):\n    \"\"\"A straightforward router that just individually routes commands to\n    single nodes based on a simple ``crc32 % node_count`` setup.\n\n    This router requires that the hosts are gapless which means that\n    the IDs for N hosts range from 0 to N-1.\n    \"\"\"\n\n    def __init__(self, cluster):\n        BaseRouter.__init__(self, cluster)\n        assert_gapless_hosts(self.cluster.hosts)\n\n    def get_host_for_key(self, key):\n        if isinstance(key, text_type):\n            k = key.encode(\"utf-8\")\n        elif isinstance(key, integer_types):\n            k = text_type(key).encode(\"utf-8\")\n        else:\n            k = bytes_type(key)\n        return crc32(k) % len(self.cluster.hosts)\n"
  },
  {
    "path": "rb/testing.py",
    "content": "import os\nimport time\nimport uuid\nimport shutil\nimport socket\nimport tempfile\n\nfrom contextlib import contextmanager\nfrom subprocess import Popen, PIPE\n\nfrom rb.cluster import Cluster\nfrom rb.utils import itervalues\n\ndevnull = open(os.devnull, \"r+\")\n\n\nclass Server(object):\n    def __init__(self, cl, socket_path):\n        self._cl = cl\n        self.socket_path = socket_path\n\n    def test_connection(self):\n        try:\n            s = socket.socket(socket.AF_UNIX, socket.SOCK_STREAM)\n            s.connect(self.socket_path)\n        except IOError:\n            return False\n        return True\n\n    def signal_stop(self):\n        if self._cl is not None:\n            self._cl.kill()\n\n    def close(self):\n        if self._cl is not None:\n            self.signal_stop()\n            self._cl.wait()\n            self._cl = None\n        try:\n            os.remove(self.socket_path)\n        except OSError:\n            pass\n\n\nclass TestSetup(object):\n    \"\"\"The test setup is a convenient way to spawn multiple redis servers\n    for testing and to shut them down automatically.  This can be used as\n    a context manager to automatically terminate the clients.\n    \"\"\"\n\n    def __init__(self, servers=4, databases_each=8, server_executable=\"redis-server\"):\n        self._fd_dir = tempfile.mkdtemp()\n        self.databases_each = databases_each\n        self.server_executable = server_executable\n        self.servers = []\n\n        for server in range(servers):\n            self.spawn_server()\n\n    def __enter__(self):\n        return self\n\n    def __exit__(self, exc_type, exc_value, tb):\n        self.close()\n\n    def make_cluster(self):\n        \"\"\"Creates a correctly configured cluster from the servers\n        spawned.  This also automatically waits for the servers to be up.\n        \"\"\"\n        self.wait_for_servers()\n        hosts = []\n        host_id = 0\n        for server in self.servers:\n            for x in range(self.databases_each):\n                hosts.append(\n                    {\n                        \"host_id\": host_id,\n                        \"unix_socket_path\": server.socket_path,\n                        \"db\": x,\n                    }\n                )\n                host_id += 1\n        return Cluster(\n            hosts, pool_options={\"encoding\": \"utf-8\", \"decode_responses\": True}\n        )\n\n    def spawn_server(self):\n        \"\"\"Spawns a new server and adds it to the pool.\"\"\"\n        socket_path = os.path.join(self._fd_dir, str(uuid.uuid4()))\n        cl = Popen([self.server_executable, \"-\"], stdin=PIPE, stdout=devnull)\n        cl.stdin.write(\n            (\n                \"\"\"\n        port 0\n        unixsocket %(path)s\n        databases %(databases)d\n        save \"\"\n        \"\"\"\n                % {\"path\": socket_path, \"databases\": self.databases_each,}\n            ).encode(\"utf-8\")\n        )\n        cl.stdin.flush()\n        cl.stdin.close()\n        self.servers.append(Server(cl, socket_path))\n\n    def wait_for_servers(self, timeout=10):\n        \"\"\"Waits for all servers to to be up and running.\"\"\"\n        unconnected_servers = dict((x.socket_path, x) for x in self.servers)\n        now = time.time()\n        while unconnected_servers:\n            for server in itervalues(unconnected_servers):\n                if server.test_connection():\n                    unconnected_servers.pop(server.socket_path, None)\n                    break\n            if time.time() > now + timeout:\n                return False\n            if unconnected_servers:\n                time.sleep(0.05)\n\n        return True\n\n    def close(self):\n        \"\"\"Closes the test setup which shuts down all redis servers.\"\"\"\n        for server in self.servers:\n            server.signal_stop()\n        for server in self.servers:\n            server.close()\n        try:\n            shutil.rmtree(self._fd_dir)\n        except (OSError, IOError):\n            pass\n\n    def __del__(self):\n        try:\n            self.close()\n        except Exception:\n            pass\n\n\n@contextmanager\ndef make_test_cluster(*args, **kwargs):\n    \"\"\"Convenient shortcut for creating a test setup and then a cluster\n    from it.  This must be used as a context manager::\n\n        from rb.testing import make_test_cluster\n        with make_test_cluster() as cluster:\n            ...\n    \"\"\"\n    with TestSetup(*args, **kwargs) as ts:\n        cluster = ts.make_cluster()\n        try:\n            yield cluster\n        finally:\n            cluster.disconnect_pools()\n"
  },
  {
    "path": "rb/utils.py",
    "content": "from __future__ import absolute_import\n\nimport sys\n\nPY2 = sys.version_info[0] == 2\n\nif PY2:\n    integer_types = (int, long)\n    text_type = unicode\n    bytes_type = str\n\n    def iteritems(d, **kw):\n        return iter(d.iteritems(**kw))\n\n    def itervalues(d, **kw):\n        return iter(d.itervalues(**kw))\n\n    from itertools import izip\n\n    from binascii import crc32\nelse:\n    integer_types = (int,)\n    text_type = str\n    bytes_type = bytes\n\n    izip = zip\n\n    def iteritems(d, **kw):\n        return iter(d.items(**kw))\n\n    def itervalues(d, **kw):\n        return iter(d.values(**kw))\n\n    from binascii import crc32 as _crc32\n\n    # In python3 crc32 was changed to never return a signed value, which is\n    # different from the python2 implementation. As noted in\n    # https://docs.python.org/3/library/binascii.html#binascii.crc32\n    #\n    # Note the documentation suggests the following:\n    #\n    # > Changed in version 3.0: The result is always unsigned. To generate the\n    # > same numeric value across all Python versions and platforms, use\n    # > crc32(data) & 0xffffffff.\n    #\n    # However this will not work when transitioning between versions, as the\n    # value MUST match what was generated in python 2.\n    #\n    # We can sign the return value using the following bit math to ensure we\n    # match the python2 output of crc32.\n    def crc32(*args):\n        rt = _crc32(*args)\n        return rt - ((rt & 0x80000000) << 1)\n"
  },
  {
    "path": "scripts/bump-version.sh",
    "content": "#!/bin/bash\nset -eu\n\nSCRIPT_DIR=\"$( cd \"$( dirname \"${BASH_SOURCE[0]}\" )\" && pwd )\"\ncd $SCRIPT_DIR/..\n\nOLD_VERSION=\"$1\"\nNEW_VERSION=\"$2\"\n\nsed -i -e \"s/^__version__ = \"'\".*\"'\"\\$/__version__ = \"'\"'\"$NEW_VERSION\"'\"'\"/\" rb/__init__.py\n\necho \"New version: $NEW_VERSION\"\n"
  },
  {
    "path": "setup.cfg",
    "content": "[bdist_wheel]\nuniversal = 1\n"
  },
  {
    "path": "setup.py",
    "content": "import re\nimport ast\nimport os\nfrom setuptools import setup\n\n\n_version_re = re.compile(r\"__version__\\s+=\\s+(.*)\")\n\n\nwith open(\"rb/__init__.py\", \"rb\") as f:\n    version = str(\n        ast.literal_eval(_version_re.search(f.read().decode(\"utf-8\")).group(1))\n    )\n\ninstall_requires = [\"redis>=2.6,!=3.4.0\"]\n\n# override django version in requirements file if DJANGO_VERSION is set\nREDIS_VERSION = os.environ.get('REDIS_VERSION')\nif REDIS_VERSION:\n    install_requires = [\n        u'redis{}'.format(REDIS_VERSION)\n        if r.startswith('redis>=') else r\n        for r in install_requires\n    ]\n\n\nsetup(\n    name=\"rb\",\n    author=\"Functional Software Inc.\",\n    author_email=\"hello@getsentry.com\",\n    version=version,\n    url=\"http://github.com/getsentry/rb\",\n    packages=[\"rb\"],\n    description=\"rb, the redis blaster\",\n    install_requires=install_requires,\n    classifiers=[\n        \"License :: OSI Approved :: Apache Software License\",\n        \"Programming Language :: Python\",\n    ],\n)\n"
  },
  {
    "path": "tests/conftest.py",
    "content": "import pytest\n\nfrom rb.testing import make_test_cluster\n\n\n@pytest.fixture\ndef cluster(request):\n    mgr = make_test_cluster()\n    cluster = mgr.__enter__()\n\n    @request.addfinalizer\n    def cleanup():\n        mgr.__exit__(None, None, None)\n\n    return cluster\n"
  },
  {
    "path": "tests/test_cluster.py",
    "content": "import time\nimport pytest\n\nimport redis\nfrom redis.exceptions import ResponseError\n\nfrom rb.cluster import Cluster\nfrom rb.router import UnroutableCommand\nfrom rb.promise import Promise\nfrom rb.utils import text_type\n\ntry:\n    from redis.commands.core import Script\nexcept ImportError:\n    from redis.client import Script\n\n\ndef test_basic_interface():\n    cluster = Cluster(\n        {0: {\"db\": 0}, 1: {\"db\": 2}, 2: {\"db\": 4, \"host\": \"127.0.0.1\"},},\n        host_defaults={\"password\": \"pass\",},\n        pool_options={\"encoding\": \"utf-8\", \"decode_responses\": True},\n    )\n\n    assert len(cluster.hosts) == 3\n\n    assert cluster.hosts[0].host_id == 0\n    assert cluster.hosts[0].db == 0\n    assert cluster.hosts[0].host == \"localhost\"\n    assert cluster.hosts[0].port == 6379\n    assert cluster.hosts[0].password == \"pass\"\n\n    assert cluster.hosts[1].host_id == 1\n    assert cluster.hosts[1].db == 2\n    assert cluster.hosts[1].host == \"localhost\"\n    assert cluster.hosts[1].port == 6379\n    assert cluster.hosts[1].password == \"pass\"\n\n    assert cluster.hosts[2].host_id == 2\n    assert cluster.hosts[2].db == 4\n    assert cluster.hosts[2].host == \"127.0.0.1\"\n    assert cluster.hosts[2].port == 6379\n    assert cluster.hosts[2].password == \"pass\"\n\n\ndef test_router_access():\n    cluster = Cluster(\n        {0: {\"db\": 0},}, pool_options={\"encoding\": \"utf-8\", \"decode_responses\": True}\n    )\n\n    router = cluster.get_router()\n    assert router.cluster is cluster\n    assert cluster.get_router() is router\n\n    cluster.add_host(1, {\"db\": 1})\n    new_router = cluster.get_router()\n    assert new_router is not router\n\n\ndef test_basic_cluster(cluster):\n    iterations = 10000\n\n    with cluster.map() as client:\n        for x in range(iterations):\n            client.set(\"key-%06d\" % x, x)\n    responses = []\n    with cluster.map() as client:\n        for x in range(iterations):\n            responses.append(client.get(\"key-%06d\" % x))\n    ref_sum = sum(int(x.value) for x in responses)\n    assert ref_sum == sum(range(iterations))\n\n\ndef test_basic_cluster_disabled_batch(cluster):\n    iterations = 10000\n\n    with cluster.map(auto_batch=False) as client:\n        for x in range(iterations):\n            client.set(\"key-%06d\" % x, x)\n    responses = []\n    with cluster.map(auto_batch=False) as client:\n        for x in range(iterations):\n            responses.append(client.get(\"key-%06d\" % x))\n    ref_sum = sum(int(x.value) for x in responses)\n    assert ref_sum == sum(range(iterations))\n\n\ndef make_zset_data(x):\n    return [(str(i), float(i)) for i in range(x, x + 10)]\n\n\ndef test_simple_api(cluster):\n    client = cluster.get_routing_client()\n    with client.map() as map_client:\n        for x in range(10):\n            map_client.set(\"key:%d\" % x, x)\n            if redis.VERSION >= (3, 0, 0):\n                map_client.zadd(\"zset:%d\" % x, dict(make_zset_data(x)))\n            else:\n                map_client.zadd(\"zset:%d\" % x, **dict(make_zset_data(x)))\n\n    for x in range(10):\n        assert client.get(\"key:%d\" % x) == str(x)\n        assert client.zrange(\"zset:%d\" % x, 0, -1, withscores=True) == make_zset_data(x)\n\n    results = []  # (promise, expected result)\n    with client.map() as map_client:\n        for x in range(10):\n            results.append(\n                (\n                    map_client.zrange(\"zset:%d\" % x, 0, -1, withscores=True),\n                    make_zset_data(x),\n                )\n            )\n\n    for promise, expectation in results:\n        assert promise.value == expectation\n\n    with client.map() as map_client:\n        for x in range(10):\n            map_client.delete(\"key:%d\" % x)\n\n    for x in range(10):\n        assert client.get(\"key:%d\" % x) is None\n\n\ndef test_routing_client_releases_connection_on_error(cluster):\n    client = cluster.get_routing_client()\n    with pytest.raises(ResponseError):\n        client.sadd(\"key\")\n\n    host = cluster.get_router().get_host_for_command(\"sadd\", [\"key\"])\n    pool = cluster.get_pool_for_host(host)\n    assert len(pool._available_connections) == pool._created_connections\n\n\ndef test_mapping_client_releases_connection_on_error(cluster):\n    client = cluster.get_routing_client().get_mapping_client()\n    client.sadd(\"key\")\n    with pytest.raises(ResponseError):\n        client.join()\n\n    host = cluster.get_router().get_host_for_command(\"sadd\", [\"key\"])\n    pool = cluster.get_pool_for_host(host)\n    assert len(pool._available_connections) == pool._created_connections\n\n\ndef test_managed_mapping_client_releases_connection_on_error(cluster):\n    with pytest.raises(ResponseError):\n        with cluster.get_routing_client().map() as client:\n            client.sadd(\"key\")\n\n    host = cluster.get_router().get_host_for_command(\"sadd\", [\"key\"])\n    pool = cluster.get_pool_for_host(host)\n    assert len(pool._available_connections) == pool._created_connections\n\n\ndef test_multi_keys_rejected(cluster):\n    client = cluster.get_routing_client()\n\n    # Okay\n    with client.map() as map_client:\n        map_client.delete(\"key\")\n\n    # Not okay\n    with client.map() as map_client:\n        with pytest.raises(UnroutableCommand):\n            map_client.delete(\"key1\", \"key2\")\n\n\ndef test_promise_api(cluster):\n    results = []\n    with cluster.map() as client:\n        for x in range(10):\n            client.set(\"key-%d\" % x, x)\n        for x in range(10):\n            client.get(\"key-%d\" % x).then(lambda x: results.append(int(x)))\n    assert sorted(results) == list(range(10))\n\n\ndef test_fanout_api(cluster):\n    for host_id in cluster.hosts:\n        client = cluster.get_local_client(host_id)\n        client.set(\"foo\", str(host_id))\n        if redis.VERSION >= (3, 0, 0):\n            client.zadd(\"zset\", dict(make_zset_data(host_id)))\n        else:\n            client.zadd(\"zset\", **dict(make_zset_data(host_id)))\n\n    with cluster.fanout(hosts=\"all\") as client:\n        get_result = client.get(\"foo\")\n        zrange_result = client.zrange(\"zset\", 0, -1, withscores=True)\n\n    for host_id in cluster.hosts:\n        assert get_result.value[host_id] == str(host_id)\n        assert zrange_result.value[host_id] == make_zset_data(host_id)\n\n\ndef test_fanout_key_target(cluster):\n    with cluster.fanout() as client:\n        c = client.target_key(\"foo\")\n        c.set(\"foo\", \"42\")\n        promise = c.get(\"foo\")\n    assert promise.value == \"42\"\n\n    client = cluster.get_routing_client()\n    assert client.get(\"foo\") == \"42\"\n\n\ndef test_fanout_targeting_api(cluster):\n    with cluster.fanout() as client:\n        client.target(hosts=[0, 1]).set(\"foo\", 42)\n        rv = client.target(hosts=\"all\").get(\"foo\")\n\n    assert list(rv.value.values()).count(\"42\") == 2\n\n    # Without hosts this should fail\n    with cluster.fanout() as client:\n        pytest.raises(RuntimeError, client.get, \"bar\")\n\n\ndef test_emulated_batch_apis(cluster):\n    with cluster.map() as map_client:\n        promise = map_client.mset(dict((\"key:%s\" % x, x) for x in range(10)))\n    assert promise.value is None\n    with cluster.map() as map_client:\n        promise = map_client.mget([\"key:%s\" % x for x in range(10)])\n    assert promise.value == list(map(text_type, range(10)))\n\n\ndef test_batch_promise_all(cluster):\n    with cluster.map() as client:\n        client.set(\"1\", \"a\")\n        client.set(\"2\", \"b\")\n        client.set(\"3\", \"c\")\n        client.set(\"4\", \"d\")\n        client.hset(\"a\", \"b\", \"XXX\")\n\n    with cluster.map() as client:\n        rv = Promise.all(\n            [client.mget(\"1\", \"2\"), client.hget(\"a\", \"b\"), client.mget(\"3\", \"4\"),]\n        )\n    assert rv.value == [[\"a\", \"b\"], \"XXX\", [\"c\", \"d\"]]\n\n\ndef test_execute_commands(cluster):\n    TestScript = Script(cluster.get_local_client(0), \"return {KEYS, ARGV}\",)\n\n    # XXX: redis<2.10.6 didn't require that a ``Script`` be instantiated with a\n    # valid client as part of the constructor, which resulted in the SHA not\n    # actually being set until the script was executed. To ensure the legacy\n    # behavior still works, we manually unset the cached SHA before executing.\n    actual_script_hash = TestScript.sha\n    TestScript.sha = None\n\n    results = cluster.execute_commands(\n        {\n            \"foo\": [\n                (\"SET\", \"foo\", \"1\"),\n                (TestScript, (\"key\",), (\"value\",)),\n                (\"GET\", \"foo\"),\n            ],\n            \"bar\": [\n                (\"INCRBY\", \"bar\", \"2\"),\n                (TestScript, (\"key\",), (\"value\",)),\n                (\"GET\", \"bar\"),\n            ],\n        }\n    )\n\n    assert TestScript.sha == actual_script_hash\n\n    assert results[\"foo\"][0].value\n    assert results[\"foo\"][1].value == [[\"key\"], [\"value\"]]\n    assert results[\"foo\"][2].value == \"1\"\n\n    assert results[\"bar\"][0].value == 2\n    assert results[\"bar\"][1].value == [[\"key\"], [\"value\"]]\n    assert results[\"bar\"][2].value == \"2\"\n\n\ndef test_reconnect(cluster):\n    with cluster.map() as client:\n        for x in range(10):\n            client.set(text_type(x), text_type(x))\n\n    with cluster.all() as client:\n        client.config_set(\"timeout\", 1)\n\n    time.sleep(2)\n\n    with cluster.map() as client:\n        rv = Promise.all([client.get(text_type(x)) for x in range(10)])\n\n    assert rv.value == list(map(text_type, range(10)))\n"
  },
  {
    "path": "tests/test_ketama.py",
    "content": "from rb.ketama import Ketama\n\n\ndef test_basic():\n    def test(k):\n        data = {}\n        for i in range(1000):\n            tower = k.get_node(\"a%s\" % i)\n            data.setdefault(tower, 0)\n            data[tower] += 1\n\n        return [\n            k.get_node(\"Apple\"),\n            k.get_node(\"Hello\"),\n            k.get_node(\"Data\"),\n            k.get_node(\"Computer\"),\n        ]\n\n    k = Ketama(\n        [\n            \"192.168.0.1:6000\",\n            \"192.168.0.1:6001\",\n            \"192.168.0.1:6002\",\n            \"192.168.0.1:6003\",\n            \"192.168.0.1:6004\",\n            \"192.168.0.1:6005\",\n            \"192.168.0.1:6006\",\n            \"192.168.0.1:6008\",\n            \"192.168.0.1:6007\",\n        ]\n    )\n    assert test(k) == [\n        \"192.168.0.1:6002\",\n        \"192.168.0.1:6007\",\n        \"192.168.0.1:6004\",\n        \"192.168.0.1:6004\",\n    ]\n\n    k.remove_node(\"192.168.0.1:6007\")\n    assert test(k) == [\n        \"192.168.0.1:6002\",\n        \"192.168.0.1:6000\",\n        \"192.168.0.1:6004\",\n        \"192.168.0.1:6004\",\n    ]\n\n    k.add_node(\"192.168.0.1:6007\")\n    assert test(k) == [\n        \"192.168.0.1:6002\",\n        \"192.168.0.1:6007\",\n        \"192.168.0.1:6004\",\n        \"192.168.0.1:6004\",\n    ]\n"
  },
  {
    "path": "tests/test_poll.py",
    "content": "import pytest\n\nfrom rb import clients\nfrom rb.poll import available_pollers\nfrom rb.utils import text_type\n\n\n@pytest.mark.parametrize(\n    \"poll\", available_pollers, ids=[x.__name__ for x in available_pollers]\n)\ndef test_simple_api(cluster, poll, monkeypatch):\n    monkeypatch.setattr(clients, \"poll\", poll)\n\n    client = cluster.get_routing_client()\n    with client.map() as map_client:\n        for x in range(10):\n            map_client.set(\"key:%s\" % x, x)\n\n    for x in range(10):\n        assert client.get(\"key:%d\" % x) == text_type(x)\n"
  },
  {
    "path": "tests/test_promise.py",
    "content": "from rb.promise import Promise\n\n\ndef test_resolved_promise():\n    p = Promise.resolved(42)\n    assert p.is_resolved\n    assert not p.is_pending\n    assert not p.is_rejected\n    assert p.value == 42\n\n\ndef test_rejected_promise():\n    err = RuntimeError(\"So fail\")\n    p = Promise.rejected(err)\n    assert not p.is_resolved\n    assert not p.is_pending\n    assert p.is_rejected\n    assert p.reason == err\n\n\ndef test_success_callbacks():\n    results = []\n\n    p = Promise()\n    assert p.is_pending\n    p.done(results.append)\n\n    assert results == []\n    p.resolve(42)\n    assert results == [42]\n\n    p = Promise.resolved(23)\n    p.done(results.append)\n\n    assert results == [42, 23]\n\n\ndef test_failure_callbacks():\n    results = []\n\n    p = Promise()\n    assert p.is_pending\n    p.done(on_failure=results.append)\n\n    assert results == []\n    p.reject(42)\n    assert results == [42]\n\n    p = Promise.rejected(23)\n    p.done(on_failure=results.append)\n\n    assert results == [42, 23]\n\n\ndef test_promise_then():\n    p = Promise.resolved([1, 2, 3])\n\n    def on_success(value):\n        return value + [4]\n\n    p2 = p.then(success=on_success)\n    assert p2.value == [1, 2, 3, 4]\n\n\ndef test_promise_all():\n    p = Promise.all([])\n    assert p.is_resolved\n    assert p.value == []\n\n    p = Promise.all({})\n    assert p.is_resolved\n    assert p.value == {}\n\n    p = Promise.all([Promise.resolved(1), Promise.resolved(2), Promise.resolved(3),])\n\n    assert p.is_resolved\n    assert p.value == [1, 2, 3]\n\n    p = Promise.all(\n        {\n            \"key1\": Promise.resolved(1),\n            \"key2\": Promise.resolved(2),\n            \"key3\": Promise.resolved(3),\n        }\n    )\n\n    assert p.is_resolved\n    assert p.value == {\"key1\": 1, \"key2\": 2, \"key3\": 3}\n\n    p = Promise.all([Promise.resolved(1), Promise.rejected(2), Promise.resolved(3),])\n    assert p.is_rejected\n    assert p.reason == 2\n\n\ndef test_auto_coercion():\n    p = Promise.all([1, 2, Promise.resolved(3)])\n    assert p.is_resolved\n    assert p.value == [1, 2, 3]\n\n    p = Promise.all({1: 1, 2: 2, 3: Promise.resolved(3)})\n    assert p.is_resolved\n    assert p.value == {1: 1, 2: 2, 3: 3}\n"
  },
  {
    "path": "tests/test_router.py",
    "content": "import pytest\n\nfrom rb.cluster import Cluster\nfrom rb.router import UnroutableCommand, extract_keys, BadHostSetup\n\n\ndef test_router_key_routing():\n    cluster = Cluster({0: {\"db\": 0},})\n\n    router = cluster.get_router()\n    assert router.get_key(\"INCR\", [\"foo\"]) == \"foo\"\n    assert router.get_key(\"GET\", [\"bar\"]) == \"bar\"\n\n    with pytest.raises(UnroutableCommand):\n        router.get_key(\"MGET\", [\"foo\", \"bar\", \"baz\"])\n\n    with pytest.raises(UnroutableCommand):\n        router.get_key(\"UNKNOWN\", [])\n\n\ndef test_host_validation():\n    cluster = Cluster(hosts={1: {}})\n    try:\n        cluster.get_router()\n    except BadHostSetup as e:\n        assert 'Expected host with ID \"0\"' in str(e)\n    else:\n        raise Exception(\"Expected runtime error\")\n\n\ndef test_router_basics():\n    cluster = Cluster({0: {\"db\": 0}, 1: {\"db\": 1}, 2: {\"db\": 2},})\n\n    router = cluster.get_router()\n    assert router.get_host_for_command(\"INCR\", [\"foo\"]) == 1\n    assert router.get_host_for_command(\"INCR\", [\"bar\"]) == 2\n    assert router.get_host_for_command(\"INCR\", [\"baz\"]) == 0\n\n    assert router.get_host_for_key(\"foo\") == 1\n    assert router.get_host_for_key(\"bar\") == 2\n    assert router.get_host_for_key(\"baz\") == 0\n\n\ndef test_key_extraction():\n    assert extract_keys([\"foo\"], (1, 1, 1))\n    assert extract_keys([\"foo\", \"value\", \"foo2\", \"value2\"], (1, -1, 2)) == [\n        \"foo\",\n        \"foo2\",\n    ]\n    assert extract_keys([\"extra\", \"foo\", \"value\", \"foo2\", \"value2\"], (2, -1, 2)) == [\n        \"foo\",\n        \"foo2\",\n    ]\n    assert extract_keys([\"foo\", \"foo2\"], (1, -1, 1)) == [\"foo\", \"foo2\"]\n"
  },
  {
    "path": "tests/test_utils.py",
    "content": "import pytest\n\nfrom rb.utils import bytes_type, crc32\n\n\ndef test_crc32():\n    \"\"\"\n    Test that we get consistent values from python 2/3\n    \"\"\"\n    assert crc32(\"test\".encode(\"utf-8\")) == -662733300\n"
  }
]