Repository: getsentry/rb
Branch: master
Commit: 29826988d6de
Files: 38
Total size: 138.3 KB
Directory structure:
gitextract_cxrwzrwv/
├── .craft.yml
├── .github/
│ └── workflows/
│ ├── build.yml
│ ├── release.yml
│ └── test.yml
├── .gitignore
├── .python-version
├── CHANGES
├── LICENSE
├── Makefile
├── README.md
├── docs/
│ ├── Makefile
│ ├── _themes/
│ │ └── rb_theme/
│ │ ├── layout.html
│ │ ├── static/
│ │ │ └── rb.css_t
│ │ └── theme.conf
│ ├── conf.py
│ ├── index.rst
│ └── make.bat
├── hooks/
│ └── pre-commit
├── rb/
│ ├── __init__.py
│ ├── _rediscommands.py
│ ├── clients.py
│ ├── cluster.py
│ ├── ketama.py
│ ├── poll.py
│ ├── promise.py
│ ├── router.py
│ ├── testing.py
│ └── utils.py
├── scripts/
│ └── bump-version.sh
├── setup.cfg
├── setup.py
└── tests/
├── conftest.py
├── test_cluster.py
├── test_ketama.py
├── test_poll.py
├── test_promise.py
├── test_router.py
└── test_utils.py
================================================
FILE CONTENTS
================================================
================================================
FILE: .craft.yml
================================================
minVersion: "0.18.0"
github:
owner: getsentry
repo: rb
changelog: CHANGES
changelogPolicy: auto
statusProvider:
name: github
artifactProvider:
name: github
targets:
- name: pypi
- name: github
- name: sentry-pypi
internalPypiRepo: getsentry/pypi
requireNames:
- /^rb-.+-py2.py3-none-any.whl$/
================================================
FILE: .github/workflows/build.yml
================================================
name: build
on:
push:
branches:
- master
- release/**
jobs:
dist:
name: Wheels
runs-on: ubuntu-latest
steps:
- uses: actions/checkout@34e114876b0b11c390a56381ad16ebd13914f8d5 # v4
- uses: actions/setup-python@a26af69be951a213d495a4c3e4e4022e16d87065 # v5
- run: |
pip install wheel
python setup.py bdist_wheel
- uses: actions/upload-artifact@ea165f8d65b6e75b540449e92b4886f43607fa02 # v4
with:
name: ${{ github.sha }}
path: dist/*
================================================
FILE: .github/workflows/release.yml
================================================
name: release
on:
workflow_dispatch:
inputs:
version:
description: Version to release
required: true
force:
description: Force a release even when there are release-blockers (optional)
required: false
jobs:
release:
runs-on: ubuntu-latest
name: "Release a new version"
steps:
- name: Get auth token
id: token
uses: actions/create-github-app-token@5d869da34e18e7287c1daad50e0b8ea0f506ce69 # v1.11.0
with:
app-id: ${{ vars.SENTRY_RELEASE_BOT_CLIENT_ID }}
private-key: ${{ secrets.SENTRY_RELEASE_BOT_PRIVATE_KEY }}
- uses: actions/checkout@34e114876b0b11c390a56381ad16ebd13914f8d5 # v4
with:
token: ${{ steps.token.outputs.token }}
fetch-depth: 0
- name: Prepare release
uses: getsentry/action-prepare-release@c8e1c2009ab08259029170132c384f03c1064c0e # v1
env:
GITHUB_TOKEN: ${{ steps.token.outputs.token }}
with:
version: ${{ github.event.inputs.version }}
force: ${{ github.event.inputs.force }}
================================================
FILE: .github/workflows/test.yml
================================================
name: test
on:
push:
branches:
- master
- release/**
pull_request:
jobs:
test:
name: Run tests
runs-on: ${{ matrix.os }}
strategy:
matrix:
os: [ubuntu-latest, macos-latest]
python: ["3.8", "3.9", "3.10", "pypy-3.8"]
REDIS_VERSION: ["<3", "<4", "<5"]
env:
REDIS_VERSION: ${{ matrix.REDIS_VERSION }}
steps:
- uses: actions/checkout@f43a0e5ff2bd294095638e18286ca9a3d1956744 # v3
- name: Setup Python
uses: actions/setup-python@a26af69be951a213d495a4c3e4e4022e16d87065 # v5
with:
python-version: ${{ matrix.python }}
- name: Install dependencies
run: |
python -m pip install --upgrade pip
pip install pytest
pip install --editable .
- name: Install Redis
run: |
if [ "$RUNNER_OS" == "Linux" ]; then
sudo apt update && sudo apt install redis-server --no-install-recommends -y
elif [ "$RUNNER_OS" == "macOS" ]; then
brew install --quiet redis
else
echo "$RUNNER_OS not supported"
exit 1
fi
- name: Run tests
run: |
make test
collector:
needs: [test]
if: always()
runs-on: ubuntu-latest
steps:
- name: Check for failures
if: contains(needs.*.result, 'failure') || contains(needs.*.result, 'cancelled')
run: |
echo "One of the dependent jobs have failed. You may need to re-run it." && exit 1
================================================
FILE: .gitignore
================================================
docs/_build
*.pyc
*.pyo
.DS_Store
.cache/
build
dist
*.egg-info
================================================
FILE: .python-version
================================================
3.8
================================================
FILE: CHANGES
================================================
Rb Changelog
============
1.10.0
------
### Various fixes & improvements
- add internal pypi deploy to `rb` (#54) by @asottile-sentry
- set fetch-depth: 0 for release (#53) by @asottile-sentry
- add compat for redis 5.x (#52) by @asottile-sentry
- fix CI (#51) by @asottile-sentry
1.9.0
-----
- Redis compatibility for 3.4.1
1.8
-----------
- Python 3.6 compatibility
- Redis compatibility for versions >=2.6,<3.4
1.7
-----------
(released Jun 23rd 2017)
- Ensure a connection is released to the pool after receiving a response, even
if the result is an error.
1.6
-----------
(released Nov 23rd 2016)
- Support `options` keyword arguments passed to `execute_command`.
1.5
-----------
(released Nov 23rd 2016)
- Detect dead connections on pool checkout.
1.4
-----------
(released on Feb 8th 2015)
- Fixed cluster for host defaults support.
- Changed poller to handle close explicitly. This should prevent
bad loops in when the socket closes while writing.
- Added support for execute_commands.
1.3.1
-------------
(released on Oct 13th 2015)
- Fixed an illogical constructor for the local client.
- Fixed a problem with clearing out pending batches.
- Hosts are now validated to not have holes in the two shipped routers
which both depend on a gapless setup.
- Connection errors now try to print out the original IO error's infos.
1.3
-----------
(released on Oct 7th 2015)
- Quickly fixed `target_key`'s behavior to make sense so that the
result on the promise is the value instead of a dictionary of a
single host.
1.2
-----------
(released on Oct 7th 2015)
- Added `target_key` to the fanout client to simplify targeting of hosts.
1.1.2
-------------
(released on Sep 28th 2015)
- Fixed command buffers for disabled max concurrency.
- Fixed map manager timeouts.
1.1.1
-------------
(released on Sep 15th 2015)
- Made rb work with older versions of pyredis.
1.1
-----------
(released on Sep 9th 2015)
- Added internal support for async writes which improves performance
and parallelism with large command batches where the command is
larger than the kernel buffer size.
1.0
-----------
(released on Sep 4th 2015)
- Added support for automatic batching of GET and SET to MGET and MSET.
- Added emulated `mget` and `mset` commands to promise based clients.
- Fixed a bug with the HostInfo not comparing correctly.
- Added support for epoll as an alternative to poll.
================================================
FILE: LICENSE
================================================
Apache License
Version 2.0, January 2004
http://www.apache.org/licenses/
TERMS AND CONDITIONS FOR USE, REPRODUCTION, AND DISTRIBUTION
1. Definitions.
"License" shall mean the terms and conditions for use, reproduction,
and distribution as defined by Sections 1 through 9 of this document.
"Licensor" shall mean the copyright owner or entity authorized by
the copyright owner that is granting the License.
"Legal Entity" shall mean the union of the acting entity and all
other entities that control, are controlled by, or are under common
control with that entity. For the purposes of this definition,
"control" means (i) the power, direct or indirect, to cause the
direction or management of such entity, whether by contract or
otherwise, or (ii) ownership of fifty percent (50%) or more of the
outstanding shares, or (iii) beneficial ownership of such entity.
"You" (or "Your") shall mean an individual or Legal Entity
exercising permissions granted by this License.
"Source" form shall mean the preferred form for making modifications,
including but not limited to software source code, documentation
source, and configuration files.
"Object" form shall mean any form resulting from mechanical
transformation or translation of a Source form, including but
not limited to compiled object code, generated documentation,
and conversions to other media types.
"Work" shall mean the work of authorship, whether in Source or
Object form, made available under the License, as indicated by a
copyright notice that is included in or attached to the work
(an example is provided in the Appendix below).
"Derivative Works" shall mean any work, whether in Source or Object
form, that is based on (or derived from) the Work and for which the
editorial revisions, annotations, elaborations, or other modifications
represent, as a whole, an original work of authorship. For the purposes
of this License, Derivative Works shall not include works that remain
separable from, or merely link (or bind by name) to the interfaces of,
the Work and Derivative Works thereof.
"Contribution" shall mean any work of authorship, including
the original version of the Work and any modifications or additions
to that Work or Derivative Works thereof, that is intentionally
submitted to Licensor for inclusion in the Work by the copyright owner
or by an individual or Legal Entity authorized to submit on behalf of
the copyright owner. For the purposes of this definition, "submitted"
means any form of electronic, verbal, or written communication sent
to the Licensor or its representatives, including but not limited to
communication on electronic mailing lists, source code control systems,
and issue tracking systems that are managed by, or on behalf of, the
Licensor for the purpose of discussing and improving the Work, but
excluding communication that is conspicuously marked or otherwise
designated in writing by the copyright owner as "Not a Contribution."
"Contributor" shall mean Licensor and any individual or Legal Entity
on behalf of whom a Contribution has been received by Licensor and
subsequently incorporated within the Work.
2. Grant of Copyright License. Subject to the terms and conditions of
this License, each Contributor hereby grants to You a perpetual,
worldwide, non-exclusive, no-charge, royalty-free, irrevocable
copyright license to reproduce, prepare Derivative Works of,
publicly display, publicly perform, sublicense, and distribute the
Work and such Derivative Works in Source or Object form.
3. Grant of Patent License. Subject to the terms and conditions of
this License, each Contributor hereby grants to You a perpetual,
worldwide, non-exclusive, no-charge, royalty-free, irrevocable
(except as stated in this section) patent license to make, have made,
use, offer to sell, sell, import, and otherwise transfer the Work,
where such license applies only to those patent claims licensable
by such Contributor that are necessarily infringed by their
Contribution(s) alone or by combination of their Contribution(s)
with the Work to which such Contribution(s) was submitted. If You
institute patent litigation against any entity (including a
cross-claim or counterclaim in a lawsuit) alleging that the Work
or a Contribution incorporated within the Work constitutes direct
or contributory patent infringement, then any patent licenses
granted to You under this License for that Work shall terminate
as of the date such litigation is filed.
4. Redistribution. You may reproduce and distribute copies of the
Work or Derivative Works thereof in any medium, with or without
modifications, and in Source or Object form, provided that You
meet the following conditions:
(a) You must give any other recipients of the Work or
Derivative Works a copy of this License; and
(b) You must cause any modified files to carry prominent notices
stating that You changed the files; and
(c) You must retain, in the Source form of any Derivative Works
that You distribute, all copyright, patent, trademark, and
attribution notices from the Source form of the Work,
excluding those notices that do not pertain to any part of
the Derivative Works; and
(d) If the Work includes a "NOTICE" text file as part of its
distribution, then any Derivative Works that You distribute must
include a readable copy of the attribution notices contained
within such NOTICE file, excluding those notices that do not
pertain to any part of the Derivative Works, in at least one
of the following places: within a NOTICE text file distributed
as part of the Derivative Works; within the Source form or
documentation, if provided along with the Derivative Works; or,
within a display generated by the Derivative Works, if and
wherever such third-party notices normally appear. The contents
of the NOTICE file are for informational purposes only and
do not modify the License. You may add Your own attribution
notices within Derivative Works that You distribute, alongside
or as an addendum to the NOTICE text from the Work, provided
that such additional attribution notices cannot be construed
as modifying the License.
You may add Your own copyright statement to Your modifications and
may provide additional or different license terms and conditions
for use, reproduction, or distribution of Your modifications, or
for any such Derivative Works as a whole, provided Your use,
reproduction, and distribution of the Work otherwise complies with
the conditions stated in this License.
5. Submission of Contributions. Unless You explicitly state otherwise,
any Contribution intentionally submitted for inclusion in the Work
by You to the Licensor shall be under the terms and conditions of
this License, without any additional terms or conditions.
Notwithstanding the above, nothing herein shall supersede or modify
the terms of any separate license agreement you may have executed
with Licensor regarding such Contributions.
6. Trademarks. This License does not grant permission to use the trade
names, trademarks, service marks, or product names of the Licensor,
except as required for reasonable and customary use in describing the
origin of the Work and reproducing the content of the NOTICE file.
7. Disclaimer of Warranty. Unless required by applicable law or
agreed to in writing, Licensor provides the Work (and each
Contributor provides its Contributions) on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or
implied, including, without limitation, any warranties or conditions
of TITLE, NON-INFRINGEMENT, MERCHANTABILITY, or FITNESS FOR A
PARTICULAR PURPOSE. You are solely responsible for determining the
appropriateness of using or redistributing the Work and assume any
risks associated with Your exercise of permissions under this License.
8. Limitation of Liability. In no event and under no legal theory,
whether in tort (including negligence), contract, or otherwise,
unless required by applicable law (such as deliberate and grossly
negligent acts) or agreed to in writing, shall any Contributor be
liable to You for damages, including any direct, indirect, special,
incidental, or consequential damages of any character arising as a
result of this License or out of the use or inability to use the
Work (including but not limited to damages for loss of goodwill,
work stoppage, computer failure or malfunction, or any and all
other commercial damages or losses), even if such Contributor
has been advised of the possibility of such damages.
9. Accepting Warranty or Additional Liability. While redistributing
the Work or Derivative Works thereof, You may choose to offer,
and charge a fee for, acceptance of support, warranty, indemnity,
or other liability obligations and/or rights consistent with this
License. However, in accepting such obligations, You may act only
on Your own behalf and on Your sole responsibility, not on behalf
of any other Contributor, and only if You agree to indemnify,
defend, and hold each Contributor harmless for any liability
incurred by, or claims asserted against, such Contributor by reason
of your accepting any such warranty or additional liability.
END OF TERMS AND CONDITIONS
APPENDIX: How to apply the Apache License to your work.
To apply the Apache License to your work, attach the following
boilerplate notice, with the fields enclosed by brackets "[]"
replaced with your own identifying information. (Don't include
the brackets!) The text should be enclosed in the appropriate
comment syntax for the file format. We also recommend that a
file or class name and description of purpose be included on the
same "printed page" as the copyright notice for easier
identification within third-party archives.
Copyright 2011-2012 DISQUS
Copyright 2015 Functional Software Inc.
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
================================================
FILE: Makefile
================================================
setup-git:
@echo "--> Installing git hooks"
@pip install flake8
@cd .git/hooks && ln -sf ../../hooks/* ./
test:
@py.test -vv --tb=short
================================================
FILE: README.md
================================================
# rb [](https://github.com/getsentry/rb/actions/workflows/test.yml)

rb - the redis blaster.
The fastest way to talk to many redis nodes. Can do routing as well as
blindly blasting commands to many nodes. How does it work?
For full documentation see [rb.rtfd.org](http://rb.rtfd.org/)
## Quickstart
Set up a cluster:
```python
from rb import Cluster
cluster = Cluster({
0: {'port': 6379},
1: {'port': 6380},
2: {'port': 6381},
3: {'port': 6382},
}, host_defaults={
'host': '127.0.0.1',
})
```
Automatic routing:
```python
results = []
with cluster.map() as client:
for key in range(100):
client.get(key).then(lambda x: results.append(int(x or 0)))
print('Sum: %s' % sum(results))
```
Fanout:
```python
with cluster.fanout(hosts=[0, 1, 2, 3]) as client:
infos = client.info()
```
Fanout to all:
```python
with cluster.fanout(hosts='all') as client:
client.flushdb()
```
================================================
FILE: docs/Makefile
================================================
# Makefile for Sphinx documentation
#
# You can set these variables from the command line.
SPHINXOPTS =
SPHINXBUILD = sphinx-build
PAPER =
BUILDDIR = _build
# Internal variables.
PAPEROPT_a4 = -D latex_paper_size=a4
PAPEROPT_letter = -D latex_paper_size=letter
ALLSPHINXOPTS = -d $(BUILDDIR)/doctrees $(PAPEROPT_$(PAPER)) $(SPHINXOPTS) .
.PHONY: help clean html dirhtml singlehtml pickle json htmlhelp qthelp devhelp epub latex latexpdf text man changes linkcheck doctest
help:
@echo "Please use \`make <target>' where <target> is one of"
@echo " html to make standalone HTML files"
@echo " dirhtml to make HTML files named index.html in directories"
@echo " singlehtml to make a single large HTML file"
@echo " pickle to make pickle files"
@echo " json to make JSON files"
@echo " htmlhelp to make HTML files and a HTML help project"
@echo " qthelp to make HTML files and a qthelp project"
@echo " devhelp to make HTML files and a Devhelp project"
@echo " epub to make an epub"
@echo " latex to make LaTeX files, you can set PAPER=a4 or PAPER=letter"
@echo " latexpdf to make LaTeX files and run them through pdflatex"
@echo " text to make text files"
@echo " man to make manual pages"
@echo " changes to make an overview of all changed/added/deprecated items"
@echo " linkcheck to check all external links for integrity"
@echo " doctest to run all doctests embedded in the documentation (if enabled)"
clean:
-rm -rf $(BUILDDIR)/*
html:
$(SPHINXBUILD) -b html $(ALLSPHINXOPTS) $(BUILDDIR)/html
@echo
@echo "Build finished. The HTML pages are in $(BUILDDIR)/html."
dirhtml:
$(SPHINXBUILD) -b dirhtml $(ALLSPHINXOPTS) $(BUILDDIR)/dirhtml
@echo
@echo "Build finished. The HTML pages are in $(BUILDDIR)/dirhtml."
singlehtml:
$(SPHINXBUILD) -b singlehtml $(ALLSPHINXOPTS) $(BUILDDIR)/singlehtml
@echo
@echo "Build finished. The HTML page is in $(BUILDDIR)/singlehtml."
pickle:
$(SPHINXBUILD) -b pickle $(ALLSPHINXOPTS) $(BUILDDIR)/pickle
@echo
@echo "Build finished; now you can process the pickle files."
json:
$(SPHINXBUILD) -b json $(ALLSPHINXOPTS) $(BUILDDIR)/json
@echo
@echo "Build finished; now you can process the JSON files."
htmlhelp:
$(SPHINXBUILD) -b htmlhelp $(ALLSPHINXOPTS) $(BUILDDIR)/htmlhelp
@echo
@echo "Build finished; now you can run HTML Help Workshop with the" \
".hhp project file in $(BUILDDIR)/htmlhelp."
qthelp:
$(SPHINXBUILD) -b qthelp $(ALLSPHINXOPTS) $(BUILDDIR)/qthelp
@echo
@echo "Build finished; now you can run "qcollectiongenerator" with the" \
".qhcp project file in $(BUILDDIR)/qthelp, like this:"
@echo "# qcollectiongenerator $(BUILDDIR)/qthelp/Classy.qhcp"
@echo "To view the help file:"
@echo "# assistant -collectionFile $(BUILDDIR)/qthelp/Classy.qhc"
devhelp:
$(SPHINXBUILD) -b devhelp $(ALLSPHINXOPTS) $(BUILDDIR)/devhelp
@echo
@echo "Build finished."
@echo "To view the help file:"
@echo "# mkdir -p $$HOME/.local/share/devhelp/Classy"
@echo "# ln -s $(BUILDDIR)/devhelp $$HOME/.local/share/devhelp/Classy"
@echo "# devhelp"
epub:
$(SPHINXBUILD) -b epub $(ALLSPHINXOPTS) $(BUILDDIR)/epub
@echo
@echo "Build finished. The epub file is in $(BUILDDIR)/epub."
latex:
$(SPHINXBUILD) -b latex $(ALLSPHINXOPTS) $(BUILDDIR)/latex
@echo
@echo "Build finished; the LaTeX files are in $(BUILDDIR)/latex."
@echo "Run \`make all-pdf' or \`make all-ps' in that directory to" \
"run these through (pdf)latex."
latexpdf: latex
$(SPHINXBUILD) -b latex $(ALLSPHINXOPTS) $(BUILDDIR)/latex
@echo "Running LaTeX files through pdflatex..."
make -C $(BUILDDIR)/latex all-pdf
@echo "pdflatex finished; the PDF files are in $(BUILDDIR)/latex."
text:
$(SPHINXBUILD) -b text $(ALLSPHINXOPTS) $(BUILDDIR)/text
@echo
@echo "Build finished. The text files are in $(BUILDDIR)/text."
man:
$(SPHINXBUILD) -b man $(ALLSPHINXOPTS) $(BUILDDIR)/man
@echo
@echo "Build finished. The manual pages are in $(BUILDDIR)/man."
changes:
$(SPHINXBUILD) -b changes $(ALLSPHINXOPTS) $(BUILDDIR)/changes
@echo
@echo "The overview file is in $(BUILDDIR)/changes."
linkcheck:
$(SPHINXBUILD) -b linkcheck $(ALLSPHINXOPTS) $(BUILDDIR)/linkcheck
@echo
@echo "Link check complete; look for any errors in the above output " \
"or in $(BUILDDIR)/linkcheck/output.txt."
doctest:
$(SPHINXBUILD) -b doctest $(ALLSPHINXOPTS) $(BUILDDIR)/doctest
@echo "Testing of doctests in the sources finished, look at the " \
"results in $(BUILDDIR)/doctest/output.txt."
================================================
FILE: docs/_themes/rb_theme/layout.html
================================================
{% extends "basic/layout.html" %}
{% block header %}
{{ super() }}
{% if pagename == 'index' %}
<div class=indexwrapper>
{% endif %}
{% endblock %}
{% block footer %}
{% if pagename == 'index' %}
</div>
{% endif %}
{% endblock %}
{# do not display relbars #}
{% block relbar1 %}{% endblock %}
{% block relbar2 %}
{% if theme_github_fork %}
<a href="http://github.com/{{ theme_github_fork }}"><img style="position: fixed; top: 0; right: 0; border: 0;"
src="http://s3.amazonaws.com/github/ribbons/forkme_right_darkblue_121621.png" alt="Fork me on GitHub" /></a>
{% endif %}
{% endblock %}
{% block sidebar1 %}{% endblock %}
{% block sidebar2 %}{% endblock %}
================================================
FILE: docs/_themes/rb_theme/static/rb.css_t
================================================
@import url("basic.css");
@import url(http://fonts.googleapis.com/css?family=Roboto+Mono:400,700italic,700,400italic);
/* -- page layout ----------------------------------------------------------- */
body {
font-family: 'Verdana', sans-serif;
font-weight: 300;
font-size: 17px;
color: #000;
background: white;
margin: 0;
padding: 0;
}
div.documentwrapper {
float: left;
width: 100%;
}
div.bodywrapper {
margin: 40px auto 0 auto;
max-width: 800px;
}
hr {
border: 1px solid #B1B4B6;
}
div.body {
background-color: #ffffff;
color: #3E4349;
padding: 0 30px 30px 30px;
}
img.floatingflask {
padding: 0 0 10px 10px;
float: right;
}
div.footer {
text-align: right;
color: #888;
padding: 10px;
font-size: 14px;
width: 650px;
margin: 0 auto 40px auto;
}
div.footer a {
color: #888;
text-decoration: underline;
}
div.related {
line-height: 32px;
color: #888;
}
div.related ul {
padding: 0 0 0 10px;
}
div.related a {
color: #444;
}
/* -- body styles ----------------------------------------------------------- */
a {
color: white;
background: black;
font-weight: bold;
text-decoration: none;
}
a:hover {
color: #888;
background: transparent;
text-decoration: underline;
}
div.body {
padding-bocodeom: 40px; /* saved for footer */
}
div.body h1,
div.body h2,
div.body h3,
div.body h4,
div.body h5,
div.body h6 {
font-family: 'Verdana', sans-serif;
font-weight: bold;
margin: 30px 0px 10px 0px;
padding: 0;
color: black;
}
div.body h1:before {
content: "";
display: block;
background: url(rb.png) no-repeat center center;
background-size: 100%;
width: 256px;
height: 246px;
float: right;
margin: 0 0 25px 25px;
}
div.body h2 { font-size: 180%; }
div.body h3 { font-size: 150%; }
div.body h4 { font-size: 130%; }
div.body h5 { font-size: 100%; }
div.body h6 { font-size: 100%; }
a.headerlink {
color: white;
padding: 0 4px;
text-decoration: none;
}
a.headerlink:hover {
color: #444;
background: #eaeaea;
}
div.body p, div.body dd, div.body li {
line-height: 1.4em;
}
div.admonition {
background: #fafafa;
margin: 20px -30px;
padding: 10px 30px;
border-top: 1px solid #ccc;
border-bocodeom: 1px solid #ccc;
}
div.admonition p.admonition-title {
font-family: 'Garamond', 'Georgia', serif;
font-weight: normal;
font-size: 24px;
margin: 0 0 10px 0;
padding: 0;
line-height: 1;
}
div.admonition p.last {
margin-bocodeom: 0;
}
div.highlight{
background-color: white;
}
dt:target, .highlight {
background: #FAF3E8;
}
div.note {
background-color: #eee;
border: 1px solid #ccc;
}
div.seealso {
background-color: #ffc;
border: 1px solid #ff6;
}
div.topic {
background-color: #eee;
}
div.warning {
background-color: #ffe4e4;
border: 1px solid #f66;
}
p.admonition-title {
display: inline;
}
p.admonition-title:after {
content: ":";
}
pre, code {
font-family: 'Roboto Mono', monospace;
font-size: 1em;
}
img.screenshot {
}
code.descname, code.descclassname {
font-size: 0.95em;
}
code.descname {
padding-right: 0.08em;
}
img.screenshot {
-moz-box-shadow: 2px 2px 4px #eee;
-webkit-box-shadow: 2px 2px 4px #eee;
box-shadow: 2px 2px 4px #eee;
}
table.docutils {
border: 1px solid #888;
-moz-box-shadow: 2px 2px 4px #eee;
-webkit-box-shadow: 2px 2px 4px #eee;
box-shadow: 2px 2px 4px #eee;
}
table.docutils td, table.docutils th {
border: 1px solid #888;
padding: 0.25em 0.7em;
}
table.field-list, table.footnote {
border: none;
-moz-box-shadow: none;
-webkit-box-shadow: none;
box-shadow: none;
}
table.footnote {
margin: 15px 0;
width: 100%;
border: 1px solid #eee;
}
table.field-list th {
padding: 0 0.8em 0 0;
}
table.field-list td {
padding: 0;
}
table.footnote td {
padding: 0.5em;
}
dl {
margin: 0;
padding: 0;
}
dl dd {
margin-left: 30px;
}
pre {
margin: 15px 0;
line-height: 1.4em;
padding: 10px 20px;
background: #eee;
}
a.reference.internal {
background: transparent;
color: black;
}
code, a code, code.xref {
background-color: #eee;
color: #222;
/* padding: 1px 2px; */
}
a:hover code {
background: black;
color: white;
}
================================================
FILE: docs/_themes/rb_theme/theme.conf
================================================
[theme]
inherit = basic
stylesheet = rb.css
nosidebar = true
[options]
index_logo = ''
github_fork =
================================================
FILE: docs/conf.py
================================================
# -*- coding: utf-8 -*-
#
# rb documentation build configuration file, created by
# sphinx-quickstart on Mon Apr 26 19:53:01 2010.
#
# This file is execfile()d with the current directory set to its containing dir.
#
# Note that not all possible configuration values are present in this
# autogenerated file.
#
# All configuration values have a default; values that are commented out
# serve to show the default.
import sys, os
# If extensions (or modules to document with autodoc) are in another directory,
# add these directories to sys.path here. If the directory is relative to the
# documentation root, use os.path.abspath to make it absolute, like shown here.
sys.path.append(os.path.abspath('_themes'))
sys.path.append(os.path.abspath('..'))
# -- General configuration -----------------------------------------------------
# If your documentation needs a minimal Sphinx version, state it here.
#needs_sphinx = '1.0'
# Add any Sphinx extension module names here, as strings. They can be extensions
# coming with Sphinx (named 'sphinx.ext.*') or your custom ones.
extensions = ['sphinx.ext.autodoc']
# Add any paths that contain templates here, relative to this directory.
templates_path = ['_templates']
# The suffix of source filenames.
source_suffix = '.rst'
# The encoding of source files.
#source_encoding = 'utf-8-sig'
# The master toctree document.
master_doc = 'index'
# General information about the project.
project = u'rb'
copyright = u'2015, Function Software Inc.'
# The version info for the project you're documenting, acts as replacement for
# |version| and |release|, also used in various other places throughout the
# built documents.
#
# The short X.Y version.
version = '1.0'
# The full version, including alpha/beta/rc tags.
release = '1.0'
# The language for content autogenerated by Sphinx. Refer to documentation
# for a list of supported languages.
#language = None
# There are two options for replacing |today|: either, you set today to some
# non-false value, then it is used:
#today = ''
# Else, today_fmt is used as the format for a strftime call.
#today_fmt = '%B %d, %Y'
# List of patterns, relative to source directory, that match files and
# directories to ignore when looking for source files.
exclude_patterns = ['_build']
# The reST default role (used for this markup: `text`) to use for all documents.
#default_role = None
# If true, '()' will be appended to :func: etc. cross-reference text.
#add_function_parentheses = True
# If true, the current module name will be prepended to all description
# unit titles (such as .. function::).
#add_module_names = True
# If true, sectionauthor and moduleauthor directives will be shown in the
# output. They are ignored by default.
#show_authors = False
# A list of ignored prefixes for module index sorting.
#modindex_common_prefix = []
# -- Options for HTML output ---------------------------------------------------
# The theme to use for HTML and HTML Help pages. Major themes that come with
# Sphinx are currently 'default' and 'sphinxdoc'.
html_theme = 'rb_theme'
# Theme options are theme-specific and customize the look and feel of a theme
# further. For a list of options available for each theme, see the
# documentation.
html_theme_options = {
'index_logo': 'rb.png',
'github_fork': 'getsentry/rb'
}
# Add any paths that contain custom themes here, relative to this directory.
html_theme_path = ['_themes']
# The name for this set of Sphinx documents. If None, it defaults to
# "<project> v<release> documentation".
html_title = 'rb'
# A shorter title for the navigation bar. Default is the same as html_title.
#html_short_title = None
# The name of an image file (relative to this directory) to place at the top
# of the sidebar.
#html_logo = None
# The name of an image file (within the static path) to use as favicon of the
# docs. This file should be a Windows icon file (.ico) being 16x16 or 32x32
# pixels large.
#html_favicon = None
# Add any paths that contain custom static files (such as style sheets) here,
# relative to this directory. They are copied after the builtin static files,
# so a file named "default.css" will overwrite the builtin "default.css".
html_static_path = ['_static']
# If not '', a 'Last updated on:' timestamp is inserted at every page bottom,
# using the given strftime format.
#html_last_updated_fmt = '%b %d, %Y'
# If true, SmartyPants will be used to convert quotes and dashes to
# typographically correct entities.
#html_use_smartypants = True
# Custom sidebar templates, maps document names to template names.
#html_sidebars = {}
# Additional templates that should be rendered to pages, maps page names to
# template names.
#html_additional_pages = {}
# If false, no module index is generated.
#html_domain_indices = True
# If false, no index is generated.
#html_use_index = True
# If true, the index is split into individual pages for each letter.
#html_split_index = False
# If true, links to the reST sources are added to the pages.
#html_show_sourcelink = True
# If true, "Created using Sphinx" is shown in the HTML footer. Default is True.
#html_show_sphinx = True
# If true, "(C) Copyright ..." is shown in the HTML footer. Default is True.
#html_show_copyright = True
# If true, an OpenSearch description file will be output, and all pages will
# contain a <link> tag referring to it. The value of this option must be the
# base URL from which the finished HTML is served.
#html_use_opensearch = ''
# If nonempty, this is the file name suffix for HTML files (e.g. ".xhtml").
#html_file_suffix = ''
# Output file base name for HTML help builder.
htmlhelp_basename = 'rbdoc'
# -- Options for LaTeX output --------------------------------------------------
# The paper size ('letter' or 'a4').
#latex_paper_size = 'letter'
# The font size ('10pt', '11pt' or '12pt').
#latex_font_size = '10pt'
# Grouping the document tree into LaTeX files. List of tuples
# (source start file, target name, title, author, documentclass [howto/manual]).
latex_documents = [
('index', 'rb.tex', u'rb documentation',
u'Function Software Inc.', 'manual'),
]
# The name of an image file (relative to this directory) to place at the top of
# the title page.
#latex_logo = None
# For "manual" documents, if this is true, then toplevel headings are parts,
# not chapters.
#latex_use_parts = False
# Additional stuff for the LaTeX preamble.
#latex_preamble = ''
# Documents to append as an appendix to all manuals.
#latex_appendices = []
# If false, no module index is generated.
#latex_domain_indices = True
pygments_style = 'tango'
# -- Options for manual page output --------------------------------------------
# One entry per manual page. List of tuples
# (source start file, name, description, authors, manual section).
man_pages = [
('index', 'rb', u'rb documentation',
[u'Function Software Inc.'], 1)
]
intersphinx_mapping = {
}
================================================
FILE: docs/index.rst
================================================
rb: the redis blaster
=====================
.. module:: rb
Rb, the redis blaster, is a library that implements non-replicated
sharding for redis. It implements a custom routing system on top of
python redis that allows you to automatically target different servers
without having to manually route requests to the individual nodes.
It does not implement all functionality of redis and does not attempt to
do so. You can at any point get a client to a specific host, but for the
most part the assumption is that your operations are limited to basic
key/value operations that can be routed to different nodes automatically.
What you can do:
* automatically target hosts for single-key operations
* execute commands against all or a subset of nodes
* do all of that in parallel
Installation
------------
rb is available on PyPI and can be installed from there::
$ pip install rb
Configuration
-------------
Getting started with rb is super easy. If you have been using py-redis
before you will feel right at home. The main difference is that instead
of connecting to a single host, you configure a cluster to connect to
multiple::
from rb import Cluster
cluster = Cluster(hosts={
0: {'port': 6379},
1: {'port': 6380},
2: {'port': 6381},
3: {'port': 6382},
4: {'port': 6379},
5: {'port': 6380},
6: {'port': 6381},
7: {'port': 6382},
}, host_defaults={
'host': '127.0.0.1',
})
In this case we set up 8 nodes on four different server processes on the
same host. The `hosts` parameter is a mapping of hosts to connect to.
The key of the dictionary is the host ID (an integer) and the value is
a dictionary of parameters. The `host_defaults` is a dictionary of
optional defaults that is filled in for all hosts. This is useful if you
want to share some common defaults that repeat (in this case all hosts
connect to localhost).
In the default configuration the :class:`PartitionRouter` is used for
routing.
Routing
-------
Now that the cluster is constructed we can use
:meth:`Cluster.get_routing_client` to get a redis client that
automatically routes to the right redis nodes for each command::
client = cluster.get_routing_client()
results = {}
for key in keys_to_look_up:
results[key] = client.get(key)
The client works pretty much exactly like a standard pyredis
`StrictClient` with the main difference that it can only execute commands
that involve exactly one key.
This basic operation however runs in series. What makes rb useful is that
it can automatically build redis pipelines and send out queries to many
hosts in parallel. This however changes the usage slightly as now the
value is not immediately available::
results = {}
with cluster.map() as client:
for key in keys_to_look_up:
results[key] = client.get(key)
While it looks similar so far, instead of storing the actual values in the
result dictionary, :class:`Promise` objects are stored instead. When the
map context manager ends they are guaranteed however to have been executed
and you can access the :attr:`Promise.value` attribute to get the value::
for key, promise in results.iteritems():
print '%s: %s' % (key, promise.value)
If you want to send a command to all participating hosts (for instance to
delete the database) you can use the :meth:`Cluster.all` method::
with cluster.all() as client:
client.flushdb()
If you do that, the promise value is a dictionary with the host IDs as
keys and the results as value. As an example::
with cluster.all() as client:
results = client.info()
for host_id, info in results.iteritems():
print 'host %s is running %s' % (host_id, info['os'])
To explicitly target some hosts you can use :meth:`Cluster.fanout` which
accepts a list of host IDs to send the command to.
API
---
This is the entire reference of the public API. Note that this library
extends the Python redis library so some of these classes have more
functionality for which you will need to consult the py-redis library.
Cluster
```````
.. autoclass:: Cluster
:members:
Clients
```````
.. autoclass:: RoutingClient
:members:
.. autoclass:: MappingClient
:members:
.. autoclass:: FanoutClient
:members:
Promise
```````
.. autoclass:: Promise
:members:
Routers
```````
.. autoclass:: BaseRouter
:members:
.. autoclass:: ConsistentHashingRouter
:members:
.. autoclass:: PartitionRouter
:members:
.. autoexception:: UnroutableCommand
Testing
```````
.. autoclass:: rb.testing.TestSetup
.. autofunction:: rb.testing.make_test_cluster
================================================
FILE: docs/make.bat
================================================
@ECHO OFF
REM Command file for Sphinx documentation
if "%SPHINXBUILD%" == "" (
set SPHINXBUILD=sphinx-build
)
set BUILDDIR=_build
set ALLSPHINXOPTS=-d %BUILDDIR%/doctrees %SPHINXOPTS% .
if NOT "%PAPER%" == "" (
set ALLSPHINXOPTS=-D latex_paper_size=%PAPER% %ALLSPHINXOPTS%
)
if "%1" == "" goto help
if "%1" == "help" (
:help
echo.Please use `make ^<target^>` where ^<target^> is one of
echo. html to make standalone HTML files
echo. dirhtml to make HTML files named index.html in directories
echo. singlehtml to make a single large HTML file
echo. pickle to make pickle files
echo. json to make JSON files
echo. htmlhelp to make HTML files and a HTML help project
echo. qthelp to make HTML files and a qthelp project
echo. devhelp to make HTML files and a Devhelp project
echo. epub to make an epub
echo. latex to make LaTeX files, you can set PAPER=a4 or PAPER=letter
echo. text to make text files
echo. man to make manual pages
echo. changes to make an overview over all changed/added/deprecated items
echo. linkcheck to check all external links for integrity
echo. doctest to run all doctests embedded in the documentation if enabled
goto end
)
if "%1" == "clean" (
for /d %%i in (%BUILDDIR%\*) do rmdir /q /s %%i
del /q /s %BUILDDIR%\*
goto end
)
if "%1" == "html" (
%SPHINXBUILD% -b html %ALLSPHINXOPTS% %BUILDDIR%/html
echo.
echo.Build finished. The HTML pages are in %BUILDDIR%/html.
goto end
)
if "%1" == "dirhtml" (
%SPHINXBUILD% -b dirhtml %ALLSPHINXOPTS% %BUILDDIR%/dirhtml
echo.
echo.Build finished. The HTML pages are in %BUILDDIR%/dirhtml.
goto end
)
if "%1" == "singlehtml" (
%SPHINXBUILD% -b singlehtml %ALLSPHINXOPTS% %BUILDDIR%/singlehtml
echo.
echo.Build finished. The HTML pages are in %BUILDDIR%/singlehtml.
goto end
)
if "%1" == "pickle" (
%SPHINXBUILD% -b pickle %ALLSPHINXOPTS% %BUILDDIR%/pickle
echo.
echo.Build finished; now you can process the pickle files.
goto end
)
if "%1" == "json" (
%SPHINXBUILD% -b json %ALLSPHINXOPTS% %BUILDDIR%/json
echo.
echo.Build finished; now you can process the JSON files.
goto end
)
if "%1" == "htmlhelp" (
%SPHINXBUILD% -b htmlhelp %ALLSPHINXOPTS% %BUILDDIR%/htmlhelp
echo.
echo.Build finished; now you can run HTML Help Workshop with the ^
.hhp project file in %BUILDDIR%/htmlhelp.
goto end
)
if "%1" == "qthelp" (
%SPHINXBUILD% -b qthelp %ALLSPHINXOPTS% %BUILDDIR%/qthelp
echo.
echo.Build finished; now you can run "qcollectiongenerator" with the ^
.qhcp project file in %BUILDDIR%/qthelp, like this:
echo.^> qcollectiongenerator %BUILDDIR%\qthelp\Classy.qhcp
echo.To view the help file:
echo.^> assistant -collectionFile %BUILDDIR%\qthelp\Classy.ghc
goto end
)
if "%1" == "devhelp" (
%SPHINXBUILD% -b devhelp %ALLSPHINXOPTS% %BUILDDIR%/devhelp
echo.
echo.Build finished.
goto end
)
if "%1" == "epub" (
%SPHINXBUILD% -b epub %ALLSPHINXOPTS% %BUILDDIR%/epub
echo.
echo.Build finished. The epub file is in %BUILDDIR%/epub.
goto end
)
if "%1" == "latex" (
%SPHINXBUILD% -b latex %ALLSPHINXOPTS% %BUILDDIR%/latex
echo.
echo.Build finished; the LaTeX files are in %BUILDDIR%/latex.
goto end
)
if "%1" == "text" (
%SPHINXBUILD% -b text %ALLSPHINXOPTS% %BUILDDIR%/text
echo.
echo.Build finished. The text files are in %BUILDDIR%/text.
goto end
)
if "%1" == "man" (
%SPHINXBUILD% -b man %ALLSPHINXOPTS% %BUILDDIR%/man
echo.
echo.Build finished. The manual pages are in %BUILDDIR%/man.
goto end
)
if "%1" == "changes" (
%SPHINXBUILD% -b changes %ALLSPHINXOPTS% %BUILDDIR%/changes
echo.
echo.The overview file is in %BUILDDIR%/changes.
goto end
)
if "%1" == "linkcheck" (
%SPHINXBUILD% -b linkcheck %ALLSPHINXOPTS% %BUILDDIR%/linkcheck
echo.
echo.Link check complete; look for any errors in the above output ^
or in %BUILDDIR%/linkcheck/output.txt.
goto end
)
if "%1" == "doctest" (
%SPHINXBUILD% -b doctest %ALLSPHINXOPTS% %BUILDDIR%/doctest
echo.
echo.Testing of doctests in the sources finished, look at the ^
results in %BUILDDIR%/doctest/output.txt.
goto end
)
:end
================================================
FILE: hooks/pre-commit
================================================
#!/usr/bin/env python
import glob
import os
import sys
os.environ['PYFLAKES_NODOCTEST'] = '1'
# pep8.py uses sys.argv to find setup.cfg
sys.argv = [os.path.join(os.path.dirname(__file__), os.pardir, os.pardir)]
# git usurbs your bin path for hooks and will always run system python
if 'VIRTUAL_ENV' in os.environ:
site_packages = glob.glob(
'%s/lib/*/site-packages' % os.environ['VIRTUAL_ENV'])[0]
sys.path.insert(0, site_packages)
def py_lint(files_modified):
from flake8.main import DEFAULT_CONFIG
from flake8.engine import get_style_guide
# remove non-py files and files which no longer exist
files_modified = filter(lambda x: x.endswith('.py'), files_modified)
if not files_modified:
return False
flake8_style = get_style_guide(config_file=DEFAULT_CONFIG)
report = flake8_style.check_files(files_modified)
return report.total_errors != 0
def main():
from flake8.hooks import run
gitcmd = "git diff-index --cached --name-only HEAD"
_, files_modified, _ = run(gitcmd)
files_modified = filter(lambda x: os.path.exists(x), files_modified)
if py_lint(files_modified):
return 1
return 0
if __name__ == '__main__':
sys.exit(main())
================================================
FILE: rb/__init__.py
================================================
"""
rb
~~
The redis blaster.
:copyright: (c) 2015 Functional Software Inc.
:license: Apache License 2.0, see LICENSE for more details.
"""
from rb.cluster import Cluster
from rb.clients import RoutingClient, MappingClient, FanoutClient
from rb.router import (
BaseRouter,
ConsistentHashingRouter,
PartitionRouter,
UnroutableCommand,
)
from rb.promise import Promise
__version__ = "1.10.0"
__all__ = [
# cluster
"Cluster",
# client
"RoutingClient",
"MappingClient",
"FanoutClient",
# router
"BaseRouter",
"ConsistentHashingRouter",
"PartitionRouter",
"UnroutableCommand",
# promise
"Promise",
]
================================================
FILE: rb/_rediscommands.py
================================================
# flake8: noqa
COMMANDS = {
"APPEND": {"arity": 3, "flags": ["write", "denyoom"], "key_spec": (1, 1, 1)},
"AUTH": {
"arity": 2,
"flags": ["readonly", "noscript", "loading", "stale", "fast"],
"key_spec": (0, 0, 0),
},
"BGREWRITEAOF": {"arity": 1, "flags": ["readonly", "admin"], "key_spec": (0, 0, 0)},
"BGSAVE": {"arity": 1, "flags": ["readonly", "admin"], "key_spec": (0, 0, 0)},
"BITCOUNT": {"arity": -2, "flags": ["readonly"], "key_spec": (1, 1, 1)},
"BITOP": {"arity": -4, "flags": ["write", "denyoom"], "key_spec": (2, -1, 1)},
"BITPOS": {"arity": -3, "flags": ["readonly"], "key_spec": (1, 1, 1)},
"BLPOP": {"arity": -3, "flags": ["write", "noscript"], "key_spec": (1, -2, 1)},
"BRPOP": {"arity": -3, "flags": ["write", "noscript"], "key_spec": (1, 1, 1)},
"BRPOPLPUSH": {
"arity": 4,
"flags": ["write", "denyoom", "noscript"],
"key_spec": (1, 2, 1),
},
"CLIENT": {"arity": -2, "flags": ["readonly", "admin"], "key_spec": (0, 0, 0)},
"COMMAND": {
"arity": 0,
"flags": ["readonly", "loading", "stale"],
"key_spec": (0, 0, 0),
},
"CONFIG": {
"arity": -2,
"flags": ["readonly", "admin", "stale"],
"key_spec": (0, 0, 0),
},
"DBSIZE": {"arity": 1, "flags": ["readonly", "fast"], "key_spec": (0, 0, 0)},
"DEBUG": {"arity": -2, "flags": ["admin", "noscript"], "key_spec": (0, 0, 0)},
"DECR": {"arity": 2, "flags": ["write", "denyoom", "fast"], "key_spec": (1, 1, 1)},
"DECRBY": {
"arity": 3,
"flags": ["write", "denyoom", "fast"],
"key_spec": (1, 1, 1),
},
"DEL": {"arity": -2, "flags": ["write"], "key_spec": (1, -1, 1)},
"DISCARD": {
"arity": 1,
"flags": ["readonly", "noscript", "fast"],
"key_spec": (0, 0, 0),
},
"DUMP": {"arity": 2, "flags": ["readonly", "admin"], "key_spec": (1, 1, 1)},
"ECHO": {"arity": 2, "flags": ["readonly", "fast"], "key_spec": (0, 0, 0)},
"EVAL": {"arity": -3, "flags": ["noscript", "movablekeys"], "key_spec": (0, 0, 0)},
"EVALSHA": {
"arity": -3,
"flags": ["noscript", "movablekeys"],
"key_spec": (0, 0, 0),
},
"EXEC": {"arity": 1, "flags": ["noscript", "skip_monitor"], "key_spec": (0, 0, 0)},
"EXISTS": {"arity": 2, "flags": ["readonly", "fast"], "key_spec": (1, 1, 1)},
"EXPIRE": {"arity": 3, "flags": ["write", "fast"], "key_spec": (1, 1, 1)},
"EXPIREAT": {"arity": 3, "flags": ["write", "fast"], "key_spec": (1, 1, 1)},
"FLUSHALL": {"arity": 1, "flags": ["write"], "key_spec": (0, 0, 0)},
"FLUSHDB": {"arity": 1, "flags": ["write"], "key_spec": (0, 0, 0)},
"GET": {"arity": 2, "flags": ["readonly", "fast"], "key_spec": (1, 1, 1)},
"GETBIT": {"arity": 3, "flags": ["readonly", "fast"], "key_spec": (1, 1, 1)},
"GETRANGE": {"arity": 4, "flags": ["readonly"], "key_spec": (1, 1, 1)},
"GETSET": {"arity": 3, "flags": ["write", "denyoom"], "key_spec": (1, 1, 1)},
"HDEL": {"arity": -3, "flags": ["write", "fast"], "key_spec": (1, 1, 1)},
"HEXISTS": {"arity": 3, "flags": ["readonly", "fast"], "key_spec": (1, 1, 1)},
"HGET": {"arity": 3, "flags": ["readonly", "fast"], "key_spec": (1, 1, 1)},
"HGETALL": {"arity": 2, "flags": ["readonly"], "key_spec": (1, 1, 1)},
"HINCRBY": {
"arity": 4,
"flags": ["write", "denyoom", "fast"],
"key_spec": (1, 1, 1),
},
"HINCRBYFLOAT": {
"arity": 4,
"flags": ["write", "denyoom", "fast"],
"key_spec": (1, 1, 1),
},
"HKEYS": {
"arity": 2,
"flags": ["readonly", "sort_for_script"],
"key_spec": (1, 1, 1),
},
"HLEN": {"arity": 2, "flags": ["readonly", "fast"], "key_spec": (1, 1, 1)},
"HMGET": {"arity": -3, "flags": ["readonly"], "key_spec": (1, 1, 1)},
"HMSET": {"arity": -4, "flags": ["write", "denyoom"], "key_spec": (1, 1, 1)},
"HSCAN": {"arity": -3, "flags": ["readonly", "random"], "key_spec": (1, 1, 1)},
"HSET": {"arity": 4, "flags": ["write", "denyoom", "fast"], "key_spec": (1, 1, 1)},
"HSETNX": {
"arity": 4,
"flags": ["write", "denyoom", "fast"],
"key_spec": (1, 1, 1),
},
"HVALS": {
"arity": 2,
"flags": ["readonly", "sort_for_script"],
"key_spec": (1, 1, 1),
},
"INCR": {"arity": 2, "flags": ["write", "denyoom", "fast"], "key_spec": (1, 1, 1)},
"INCRBY": {
"arity": 3,
"flags": ["write", "denyoom", "fast"],
"key_spec": (1, 1, 1),
},
"INCRBYFLOAT": {
"arity": 3,
"flags": ["write", "denyoom", "fast"],
"key_spec": (1, 1, 1),
},
"INFO": {
"arity": -1,
"flags": ["readonly", "loading", "stale"],
"key_spec": (0, 0, 0),
},
"KEYS": {
"arity": 2,
"flags": ["readonly", "sort_for_script"],
"key_spec": (0, 0, 0),
},
"LASTSAVE": {
"arity": 1,
"flags": ["readonly", "random", "fast"],
"key_spec": (0, 0, 0),
},
"LATENCY": {
"arity": -2,
"flags": ["readonly", "admin", "noscript", "loading", "stale"],
"key_spec": (0, 0, 0),
},
"LINDEX": {"arity": 3, "flags": ["readonly"], "key_spec": (1, 1, 1)},
"LINSERT": {"arity": 5, "flags": ["write", "denyoom"], "key_spec": (1, 1, 1)},
"LLEN": {"arity": 2, "flags": ["readonly", "fast"], "key_spec": (1, 1, 1)},
"LPOP": {"arity": 2, "flags": ["write", "fast"], "key_spec": (1, 1, 1)},
"LPUSH": {
"arity": -3,
"flags": ["write", "denyoom", "fast"],
"key_spec": (1, 1, 1),
},
"LPUSHX": {
"arity": 3,
"flags": ["write", "denyoom", "fast"],
"key_spec": (1, 1, 1),
},
"LRANGE": {"arity": 4, "flags": ["readonly"], "key_spec": (1, 1, 1)},
"LREM": {"arity": 4, "flags": ["write"], "key_spec": (1, 1, 1)},
"LSET": {"arity": 4, "flags": ["write", "denyoom"], "key_spec": (1, 1, 1)},
"LTRIM": {"arity": 4, "flags": ["write"], "key_spec": (1, 1, 1)},
"MGET": {"arity": -2, "flags": ["readonly"], "key_spec": (1, -1, 1)},
"MIGRATE": {"arity": 6, "flags": ["write", "admin"], "key_spec": (0, 0, 0)},
"MONITOR": {
"arity": 1,
"flags": ["readonly", "admin", "noscript"],
"key_spec": (0, 0, 0),
},
"MOVE": {"arity": 3, "flags": ["write", "fast"], "key_spec": (1, 1, 1)},
"MSET": {"arity": -3, "flags": ["write", "denyoom"], "key_spec": (1, -1, 2)},
"MSETNX": {"arity": -3, "flags": ["write", "denyoom"], "key_spec": (1, -1, 2)},
"MULTI": {
"arity": 1,
"flags": ["readonly", "noscript", "fast"],
"key_spec": (0, 0, 0),
},
"OBJECT": {"arity": 3, "flags": ["readonly"], "key_spec": (2, 2, 2)},
"PERSIST": {"arity": 2, "flags": ["write", "fast"], "key_spec": (1, 1, 1)},
"PEXPIRE": {"arity": 3, "flags": ["write", "fast"], "key_spec": (1, 1, 1)},
"PEXPIREAT": {"arity": 3, "flags": ["write", "fast"], "key_spec": (1, 1, 1)},
"PFADD": {
"arity": -2,
"flags": ["write", "denyoom", "fast"],
"key_spec": (1, 1, 1),
},
"PFCOUNT": {"arity": -2, "flags": ["write"], "key_spec": (1, 1, 1)},
"PFDEBUG": {"arity": -3, "flags": ["write"], "key_spec": (0, 0, 0)},
"PFMERGE": {"arity": -2, "flags": ["write", "denyoom"], "key_spec": (1, -1, 1)},
"PFSELFTEST": {"arity": 1, "flags": ["readonly"], "key_spec": (0, 0, 0)},
"PING": {"arity": 1, "flags": ["readonly", "stale", "fast"], "key_spec": (0, 0, 0)},
"PSETEX": {"arity": 4, "flags": ["write", "denyoom"], "key_spec": (1, 1, 1)},
"PSUBSCRIBE": {
"arity": -2,
"flags": ["readonly", "pubsub", "noscript", "loading", "stale"],
"key_spec": (0, 0, 0),
},
"PSYNC": {
"arity": 3,
"flags": ["readonly", "admin", "noscript"],
"key_spec": (0, 0, 0),
},
"PTTL": {"arity": 2, "flags": ["readonly", "fast"], "key_spec": (1, 1, 1)},
"PUBLISH": {
"arity": 3,
"flags": ["readonly", "pubsub", "loading", "stale", "fast"],
"key_spec": (0, 0, 0),
},
"PUBSUB": {
"arity": -2,
"flags": ["readonly", "pubsub", "random", "loading", "stale"],
"key_spec": (0, 0, 0),
},
"PUNSUBSCRIBE": {
"arity": -1,
"flags": ["readonly", "pubsub", "noscript", "loading", "stale"],
"key_spec": (0, 0, 0),
},
"RANDOMKEY": {"arity": 1, "flags": ["readonly", "random"], "key_spec": (0, 0, 0)},
"RENAME": {"arity": 3, "flags": ["write"], "key_spec": (1, 2, 1)},
"RENAMENX": {"arity": 3, "flags": ["write", "fast"], "key_spec": (1, 2, 1)},
"REPLCONF": {
"arity": -1,
"flags": ["readonly", "admin", "noscript", "loading", "stale"],
"key_spec": (0, 0, 0),
},
"RESTORE": {
"arity": 4,
"flags": ["write", "denyoom", "admin"],
"key_spec": (1, 1, 1),
},
"ROLE": {
"arity": 1,
"flags": ["admin", "noscript", "loading", "stale"],
"key_spec": (0, 0, 0),
},
"RPOP": {"arity": 2, "flags": ["write", "fast"], "key_spec": (1, 1, 1)},
"RPOPLPUSH": {"arity": 3, "flags": ["write", "denyoom"], "key_spec": (1, 2, 1)},
"RPUSH": {
"arity": -3,
"flags": ["write", "denyoom", "fast"],
"key_spec": (1, 1, 1),
},
"RPUSHX": {
"arity": 3,
"flags": ["write", "denyoom", "fast"],
"key_spec": (1, 1, 1),
},
"SADD": {"arity": -3, "flags": ["write", "denyoom", "fast"], "key_spec": (1, 1, 1)},
"SAVE": {
"arity": 1,
"flags": ["readonly", "admin", "noscript"],
"key_spec": (0, 0, 0),
},
"SCAN": {"arity": -2, "flags": ["readonly", "random"], "key_spec": (0, 0, 0)},
"SCARD": {"arity": 2, "flags": ["readonly", "fast"], "key_spec": (1, 1, 1)},
"SCRIPT": {
"arity": -2,
"flags": ["readonly", "admin", "noscript"],
"key_spec": (0, 0, 0),
},
"SDIFF": {
"arity": -2,
"flags": ["readonly", "sort_for_script"],
"key_spec": (1, -1, 1),
},
"SDIFFSTORE": {"arity": -3, "flags": ["write", "denyoom"], "key_spec": (1, -1, 1)},
"SELECT": {
"arity": 2,
"flags": ["readonly", "loading", "fast"],
"key_spec": (0, 0, 0),
},
"SET": {"arity": -3, "flags": ["write", "denyoom"], "key_spec": (1, 1, 1)},
"SETBIT": {"arity": 4, "flags": ["write", "denyoom"], "key_spec": (1, 1, 1)},
"SETEX": {"arity": 4, "flags": ["write", "denyoom"], "key_spec": (1, 1, 1)},
"SETNX": {"arity": 3, "flags": ["write", "denyoom", "fast"], "key_spec": (1, 1, 1)},
"SETRANGE": {"arity": 4, "flags": ["write", "denyoom"], "key_spec": (1, 1, 1)},
"SHUTDOWN": {
"arity": -1,
"flags": ["readonly", "admin", "loading", "stale"],
"key_spec": (0, 0, 0),
},
"SINTER": {
"arity": -2,
"flags": ["readonly", "sort_for_script"],
"key_spec": (1, -1, 1),
},
"SINTERSTORE": {"arity": -3, "flags": ["write", "denyoom"], "key_spec": (1, -1, 1)},
"SISMEMBER": {"arity": 3, "flags": ["readonly", "fast"], "key_spec": (1, 1, 1)},
"SLAVEOF": {
"arity": 3,
"flags": ["admin", "noscript", "stale"],
"key_spec": (0, 0, 0),
},
"SLOWLOG": {"arity": -2, "flags": ["readonly"], "key_spec": (0, 0, 0)},
"SMEMBERS": {
"arity": 2,
"flags": ["readonly", "sort_for_script"],
"key_spec": (1, 1, 1),
},
"SMOVE": {"arity": 4, "flags": ["write", "fast"], "key_spec": (1, 2, 1)},
"SORT": {"arity": -2, "flags": ["write", "denyoom"], "key_spec": (1, 1, 1)},
"SPOP": {
"arity": 2,
"flags": ["write", "noscript", "random", "fast"],
"key_spec": (1, 1, 1),
},
"SRANDMEMBER": {
"arity": -2,
"flags": ["readonly", "random"],
"key_spec": (1, 1, 1),
},
"SREM": {"arity": -3, "flags": ["write", "fast"], "key_spec": (1, 1, 1)},
"SSCAN": {"arity": -3, "flags": ["readonly", "random"], "key_spec": (1, 1, 1)},
"STRLEN": {"arity": 2, "flags": ["readonly", "fast"], "key_spec": (1, 1, 1)},
"SUBSCRIBE": {
"arity": -2,
"flags": ["readonly", "pubsub", "noscript", "loading", "stale"],
"key_spec": (0, 0, 0),
},
"SUBSTR": {"arity": 4, "flags": ["readonly"], "key_spec": (1, 1, 1)},
"SUNION": {
"arity": -2,
"flags": ["readonly", "sort_for_script"],
"key_spec": (1, -1, 1),
},
"SUNIONSTORE": {"arity": -3, "flags": ["write", "denyoom"], "key_spec": (1, -1, 1)},
"SYNC": {
"arity": 1,
"flags": ["readonly", "admin", "noscript"],
"key_spec": (0, 0, 0),
},
"TIME": {
"arity": 1,
"flags": ["readonly", "random", "fast"],
"key_spec": (0, 0, 0),
},
"TTL": {"arity": 2, "flags": ["readonly", "fast"], "key_spec": (1, 1, 1)},
"TYPE": {"arity": 2, "flags": ["readonly", "fast"], "key_spec": (1, 1, 1)},
"UNSUBSCRIBE": {
"arity": -1,
"flags": ["readonly", "pubsub", "noscript", "loading", "stale"],
"key_spec": (0, 0, 0),
},
"UNWATCH": {
"arity": 1,
"flags": ["readonly", "noscript", "fast"],
"key_spec": (0, 0, 0),
},
"WATCH": {
"arity": -2,
"flags": ["readonly", "noscript", "fast"],
"key_spec": (1, -1, 1),
},
"ZADD": {"arity": -4, "flags": ["write", "denyoom", "fast"], "key_spec": (1, 1, 1)},
"ZCARD": {"arity": 2, "flags": ["readonly", "fast"], "key_spec": (1, 1, 1)},
"ZCOUNT": {"arity": 4, "flags": ["readonly", "fast"], "key_spec": (1, 1, 1)},
"ZINCRBY": {
"arity": 4,
"flags": ["write", "denyoom", "fast"],
"key_spec": (1, 1, 1),
},
"ZINTERSTORE": {
"arity": -4,
"flags": ["write", "denyoom", "movablekeys"],
"key_spec": (0, 0, 0),
},
"ZLEXCOUNT": {"arity": 4, "flags": ["readonly", "fast"], "key_spec": (1, 1, 1)},
"ZRANGE": {"arity": -4, "flags": ["readonly"], "key_spec": (1, 1, 1)},
"ZRANGEBYLEX": {"arity": -4, "flags": ["readonly"], "key_spec": (1, 1, 1)},
"ZRANGEBYSCORE": {"arity": -4, "flags": ["readonly"], "key_spec": (1, 1, 1)},
"ZRANK": {"arity": 3, "flags": ["readonly", "fast"], "key_spec": (1, 1, 1)},
"ZREM": {"arity": -3, "flags": ["write", "fast"], "key_spec": (1, 1, 1)},
"ZREMRANGEBYLEX": {"arity": 4, "flags": ["write"], "key_spec": (1, 1, 1)},
"ZREMRANGEBYRANK": {"arity": 4, "flags": ["write"], "key_spec": (1, 1, 1)},
"ZREMRANGEBYSCORE": {"arity": 4, "flags": ["write"], "key_spec": (1, 1, 1)},
"ZREVRANGE": {"arity": -4, "flags": ["readonly"], "key_spec": (1, 1, 1)},
"ZREVRANGEBYLEX": {"arity": -4, "flags": ["readonly"], "key_spec": (1, 1, 1)},
"ZREVRANGEBYSCORE": {"arity": -4, "flags": ["readonly"], "key_spec": (1, 1, 1)},
"ZREVRANK": {"arity": 3, "flags": ["readonly", "fast"], "key_spec": (1, 1, 1)},
"ZSCAN": {"arity": -3, "flags": ["readonly", "random"], "key_spec": (1, 1, 1)},
"ZSCORE": {"arity": 3, "flags": ["readonly", "fast"], "key_spec": (1, 1, 1)},
"ZUNIONSTORE": {
"arity": -4,
"flags": ["write", "denyoom", "movablekeys"],
"key_spec": (0, 0, 0),
},
}
if __name__ == "__main__":
import redis
import pprint
rv = {}
for row in redis.Redis().execute_command("COMMAND"):
cmd, arity, flags, first_key, last_key, step_count = row
rv[cmd.upper()] = {
"arity": arity,
"flags": flags,
"key_spec": (int(first_key), int(last_key), int(step_count)),
}
tail = []
with open(__file__.rstrip("co"), "r+") as f:
for line in f:
if line.strip() == "if __name__ == '__main__':":
tail.append(line)
tail.extend(f)
break
f.seek(0)
f.truncate(0)
f.write(
"# flake8: noqa\n\nCOMMANDS = %s\n\n\n%s"
% (pprint.pformat(rv, width=74), "".join(tail))
)
================================================
FILE: rb/clients.py
================================================
import time
import errno
import socket
from weakref import ref as weakref
from redis import StrictRedis
from redis.client import list_or_args
from redis.exceptions import ConnectionError
try:
from redis.exceptions import TimeoutError
except ImportError:
TimeoutError = ConnectionError
from rb.promise import Promise
from rb.poll import poll, is_closed
from rb.utils import izip, iteritems
AUTO_BATCH_COMMANDS = {
"GET": ("MGET", True),
"SET": ("MSET", False),
}
def assert_open(client):
if client.closed:
raise ValueError("I/O operation on closed file")
def merge_batch(command_name, arg_promise_tuples):
batch_command, list_response = AUTO_BATCH_COMMANDS[command_name]
if len(arg_promise_tuples) == 1:
args, promise = arg_promise_tuples[0]
return command_name, args, {}, promise
promise = Promise()
@promise.done
def on_success(value):
if list_response:
for item, (_, promise) in izip(value, arg_promise_tuples):
promise.resolve(item)
else:
for _, promise in arg_promise_tuples:
promise.resolve(value)
args = []
for individual_args, _ in arg_promise_tuples:
args.extend(individual_args)
return batch_command, args, {}, promise
def auto_batch_commands(commands):
"""Given a pipeline of commands this attempts to merge the commands
into more efficient ones if that is possible.
"""
pending_batch = None
for command_name, args, options, promise in commands:
# This command cannot be batched, return it as such.
if command_name not in AUTO_BATCH_COMMANDS:
if pending_batch:
yield merge_batch(*pending_batch)
pending_batch = None
yield command_name, args, options, promise
continue
assert not options, "batch commands cannot merge options"
if pending_batch and pending_batch[0] == command_name:
pending_batch[1].append((args, promise))
else:
if pending_batch:
yield merge_batch(*pending_batch)
pending_batch = (command_name, [(args, promise)])
if pending_batch:
yield merge_batch(*pending_batch)
class CommandBuffer(object):
"""The command buffer is an internal construct """
def __init__(self, host_id, connect, auto_batch=True):
self.host_id = host_id
self.connection = None
self._connect_func = connect
self.connect()
self.commands = []
self.pending_responses = []
self.auto_batch = auto_batch
self.sent_something = False
self.reconnects = 0
self._send_buf = []
@property
def closed(self):
"""Indicates if the command buffer is closed."""
return self.connection is None or self.connection._sock is None
def connect(self):
if self.connection is not None:
return
self.connection = self._connect_func()
# Ensure we're connected. Without this, we won't have a socket
# we can select over.
self.connection.connect()
def reconnect(self):
if self.sent_something:
raise RuntimeError(
"Cannot reset command buffer that already " "sent out data."
)
if self.reconnects > 5:
return False
self.reconnects += 1
self.connection = None
self.connect()
return True
def fileno(self):
"""Returns the file number of the underlying connection's socket
to be able to select over it.
"""
assert_open(self)
return self.connection._sock.fileno()
def enqueue_command(self, command_name, args, options):
"""Enqueue a new command into this pipeline."""
assert_open(self)
promise = Promise()
self.commands.append((command_name, args, options, promise))
return promise
@property
def has_pending_requests(self):
"""Indicates if there are outstanding pending requests on this
buffer.
"""
return bool(self._send_buf or self.commands)
def send_buffer(self):
"""Utility function that sends the buffer into the provided socket.
The buffer itself will slowly clear out and is modified in place.
"""
buf = self._send_buf
sock = self.connection._sock
try:
timeout = sock.gettimeout()
sock.setblocking(False)
try:
for idx, item in enumerate(buf):
sent = 0
while 1:
try:
sent = sock.send(item)
except IOError as e:
if e.errno == errno.EAGAIN:
continue
elif e.errno == errno.EWOULDBLOCK:
break
raise
self.sent_something = True
break
if sent < len(item):
buf[: idx + 1] = [item[sent:]]
break
else:
del buf[:]
finally:
sock.settimeout(timeout)
except IOError as e:
self.connection.disconnect()
if isinstance(e, socket.timeout):
raise TimeoutError("Timeout writing to socket (host %s)" % self.host_id)
raise ConnectionError(
"Error while writing to socket (host %s): %s" % (self.host_id, e)
)
def send_pending_requests(self):
"""Sends all pending requests into the connection. The default is
to only send pending data that fits into the socket without blocking.
This returns `True` if all data was sent or `False` if pending data
is left over.
"""
assert_open(self)
unsent_commands = self.commands
if unsent_commands:
self.commands = []
if self.auto_batch:
unsent_commands = auto_batch_commands(unsent_commands)
buf = []
for command_name, args, options, promise in unsent_commands:
buf.append((command_name,) + tuple(args))
self.pending_responses.append((command_name, options, promise))
cmds = self.connection.pack_commands(buf)
self._send_buf.extend(cmds)
if not self._send_buf:
return True
self.send_buffer()
return not self._send_buf
def wait_for_responses(self, client):
"""Waits for all responses to come back and resolves the
eventual results.
"""
assert_open(self)
if self.has_pending_requests:
raise RuntimeError(
"Cannot wait for responses if there are "
"pending requests outstanding. You need "
"to wait for pending requests to be sent "
"first."
)
pending = self.pending_responses
self.pending_responses = []
for command_name, options, promise in pending:
value = client.parse_response(self.connection, command_name, **options)
promise.resolve(value)
class RoutingPool(object):
"""The routing pool works together with the routing client to
internally dispatch through the cluster's router to the correct
internal connection pool.
"""
def __init__(self, cluster):
self.cluster = cluster
def get_connection(self, command_name, shard_hint=None):
host_id = shard_hint
if host_id is None:
raise RuntimeError("The routing pool requires the host id " "as shard hint")
real_pool = self.cluster.get_pool_for_host(host_id)
# When we check something out from the real underlying pool it's
# very much possible that the connection is stale. This is why we
# check out up to 10 connections which are either not connected
# yet or verified alive.
for _ in range(10):
con = real_pool.get_connection(command_name)
if con._sock is None or not is_closed(con._sock):
con.__creating_pool = weakref(real_pool)
return con
raise ConnectionError(
"Failed to check out a valid connection " "(host %s)" % host_id
)
def release(self, connection):
# The real pool is referenced by the connection through an
# internal weakref. If the weakref is broken it means the
# pool is already gone and we do not need to release the
# connection.
try:
real_pool = connection.__creating_pool()
except (AttributeError, TypeError):
real_pool = None
if real_pool is not None:
real_pool.release(connection)
def disconnect(self):
self.cluster.disconnect_pools()
def reset(self):
pass
class BaseClient(StrictRedis):
pass
class RoutingBaseClient(BaseClient):
def __init__(self, connection_pool, auto_batch=True):
BaseClient.__init__(self, connection_pool=connection_pool)
self.auto_batch = auto_batch
def pubsub(self, **kwargs):
raise NotImplementedError("Pubsub is unsupported.")
def pipeline(self, transaction=True, shard_hint=None):
raise NotImplementedError(
"Manual pipelines are unsupported. rb " "automatically pipelines commands."
)
def lock(self, *args, **kwargs):
raise NotImplementedError("Locking is not supported.")
class MappingClient(RoutingBaseClient):
"""The routing client uses the cluster's router to target an individual
node automatically based on the key of the redis command executed.
For the parameters see :meth:`Cluster.map`.
"""
def __init__(self, connection_pool, max_concurrency=None, auto_batch=True):
RoutingBaseClient.__init__(
self, connection_pool=connection_pool, auto_batch=auto_batch
)
# careful. If you introduce any other variables here, then make
# sure that FanoutClient.target still works correctly!
self._max_concurrency = max_concurrency
self._cb_poll = poll()
# For the mapping client we can fix up some redis standard commands
# as we are promise based and have some flexibility here.
def mget(self, keys, *args):
args = list_or_args(keys, args)
return Promise.all([self.get(arg) for arg in args])
def mset(self, *args, **kwargs):
return Promise.all(
[self.set(k, v) for k, v in iteritems(dict(*args, **kwargs))]
).then(lambda x: None)
# Standard redis methods
def execute_command(self, *args, **options):
router = self.connection_pool.cluster.get_router()
host_id = router.get_host_for_command(args[0], args[1:])
buf = self._get_command_buffer(host_id, args[0])
return buf.enqueue_command(args[0], args[1:], options)
# Custom Internal API
def _get_command_buffer(self, host_id, command_name):
"""Returns the command buffer for the given command and arguments."""
buf = self._cb_poll.get(host_id)
if buf is not None:
return buf
if self._max_concurrency is not None:
while len(self._cb_poll) >= self._max_concurrency:
self.join(timeout=1.0)
def connect():
return self.connection_pool.get_connection(command_name, shard_hint=host_id)
buf = CommandBuffer(host_id, connect, self.auto_batch)
self._cb_poll.register(host_id, buf)
return buf
def _release_command_buffer(self, command_buffer):
"""This is called by the command buffer when it closes."""
if command_buffer.closed:
return
self._cb_poll.unregister(command_buffer.host_id)
self.connection_pool.release(command_buffer.connection)
command_buffer.connection = None
def _send_or_reconnect(self, command_buffer):
try:
command_buffer.send_pending_requests()
except ConnectionError as e:
self._try_reconnect(command_buffer, e)
def _try_reconnect(self, command_buffer, err=None):
# If something was sent before, we can't do anything at which
# point we just reraise the underlying error.
if command_buffer.sent_something:
raise err or ConnectionError(
"Cannot reconnect when data was " "already sent."
)
self._release_command_buffer(command_buffer)
# If we cannot reconnect, reraise the error.
if not command_buffer.reconnect():
raise err or ConnectionError("Too many attempts to reconnect.")
self._cb_poll.register(command_buffer.host_id, command_buffer)
# Custom Public API
def join(self, timeout=None):
"""Waits for all outstanding responses to come back or the timeout
to be hit.
"""
remaining = timeout
while self._cb_poll and (remaining is None or remaining > 0):
now = time.time()
rv = self._cb_poll.poll(remaining)
if remaining is not None:
remaining -= time.time() - now
for command_buffer, event in rv:
# This command buffer still has pending requests which
# means we have to send them out first before we can read
# all the data from it.
if command_buffer.has_pending_requests:
if event == "close":
self._try_reconnect(command_buffer)
elif event == "write":
self._send_or_reconnect(command_buffer)
# The general assumption is that all response is available
# or this might block. On reading we do not use async
# receiving. This generally works because latency in the
# network is low and redis is super quick in sending. It
# does not make a lot of sense to complicate things here.
elif event in ("read", "close"):
try:
command_buffer.wait_for_responses(self)
finally:
self._release_command_buffer(command_buffer)
if self._cb_poll and timeout is not None:
raise TimeoutError("Did not receive all data in time.")
def cancel(self):
"""Cancels all outstanding requests."""
for command_buffer in self._cb_poll:
self._release_command_buffer(command_buffer)
class FanoutClient(MappingClient):
"""This works similar to the :class:`MappingClient` but instead of
using the router to target hosts, it sends the commands to all manually
specified hosts.
The results are accumulated in a dictionary keyed by the `host_id`.
For the parameters see :meth:`Cluster.fanout`.
"""
def __init__(self, hosts, connection_pool, max_concurrency=None, auto_batch=True):
MappingClient.__init__(
self, connection_pool, max_concurrency, auto_batch=auto_batch
)
self._target_hosts = hosts
self.__is_retargeted = False
self.__resolve_singular_result = False
def target(self, hosts):
"""Temporarily retarget the client for one call. This is useful
when having to deal with a subset of hosts for one call.
"""
if self.__is_retargeted:
raise TypeError("Cannot use target more than once.")
rv = FanoutClient(
hosts,
connection_pool=self.connection_pool,
max_concurrency=self._max_concurrency,
)
rv._cb_poll = self._cb_poll
rv.__is_retargeted = True
return rv
def target_key(self, key):
"""Temporarily retarget the client for one call to route
specifically to the one host that the given key routes to. In
that case the result on the promise is just the one host's value
instead of a dictionary.
.. versionadded:: 1.3
"""
router = self.connection_pool.cluster.get_router()
host_id = router.get_host_for_key(key)
rv = self.target([host_id])
rv.__resolve_singular_result = True
return rv
def execute_command(self, *args, **options):
promises = {}
hosts = self._target_hosts
if hosts == "all":
hosts = list(self.connection_pool.cluster.hosts.keys())
elif hosts is None:
raise RuntimeError("Fanout client was not targeted to hosts.")
for host_id in hosts:
buf = self._get_command_buffer(host_id, args[0])
promise = buf.enqueue_command(args[0], args[1:], options)
if self.__resolve_singular_result and len(hosts) == 1:
return promise
promises[host_id] = promise
return Promise.all(promises)
class RoutingClient(RoutingBaseClient):
"""A client that can route to individual targets.
For the parameters see :meth:`Cluster.get_routing_client`.
"""
def __init__(self, cluster, auto_batch=True):
RoutingBaseClient.__init__(
self, connection_pool=RoutingPool(cluster), auto_batch=auto_batch
)
# Standard redis methods
def execute_command(self, *args, **options):
pool = self.connection_pool
command_name = args[0]
command_args = args[1:]
router = self.connection_pool.cluster.get_router()
host_id = router.get_host_for_command(command_name, command_args)
connection = pool.get_connection(command_name, shard_hint=host_id)
try:
connection.send_command(*args)
return self.parse_response(connection, command_name, **options)
except (ConnectionError, TimeoutError) as e:
connection.disconnect()
if not connection.retry_on_timeout and isinstance(e, TimeoutError):
raise
connection.send_command(*args)
return self.parse_response(connection, command_name, **options)
finally:
pool.release(connection)
# Custom Public API
def get_mapping_client(self, max_concurrency=64, auto_batch=None):
"""Returns a thread unsafe mapping client. This client works
similar to a redis pipeline and returns eventual result objects.
It needs to be joined on to work properly. Instead of using this
directly you shold use the :meth:`map` context manager which
automatically joins.
Returns an instance of :class:`MappingClient`.
"""
if auto_batch is None:
auto_batch = self.auto_batch
return MappingClient(
connection_pool=self.connection_pool,
max_concurrency=max_concurrency,
auto_batch=auto_batch,
)
def get_fanout_client(self, hosts, max_concurrency=64, auto_batch=None):
"""Returns a thread unsafe fanout client.
Returns an instance of :class:`FanoutClient`.
"""
if auto_batch is None:
auto_batch = self.auto_batch
return FanoutClient(
hosts,
connection_pool=self.connection_pool,
max_concurrency=max_concurrency,
auto_batch=auto_batch,
)
def map(self, timeout=None, max_concurrency=64, auto_batch=None):
"""Returns a context manager for a map operation. This runs
multiple queries in parallel and then joins in the end to collect
all results.
In the context manager the client available is a
:class:`MappingClient`. Example usage::
results = {}
with cluster.map() as client:
for key in keys_to_fetch:
results[key] = client.get(key)
for key, promise in results.iteritems():
print '%s => %s' % (key, promise.value)
"""
return MapManager(
self.get_mapping_client(max_concurrency, auto_batch), timeout=timeout
)
def fanout(self, hosts=None, timeout=None, max_concurrency=64, auto_batch=None):
"""Returns a context manager for a map operation that fans out to
manually specified hosts instead of using the routing system. This
can for instance be used to empty the database on all hosts. The
context manager returns a :class:`FanoutClient`. Example usage::
with cluster.fanout(hosts=[0, 1, 2, 3]) as client:
results = client.info()
for host_id, info in results.value.iteritems():
print '%s -> %s' % (host_id, info['is'])
The promise returned accumulates all results in a dictionary keyed
by the `host_id`.
The `hosts` parameter is a list of `host_id`\s or alternatively the
string ``'all'`` to send the commands to all hosts.
The fanout APi needs to be used with a lot of care as it can cause
a lot of damage when keys are written to hosts that do not expect
them.
"""
return MapManager(
self.get_fanout_client(hosts, max_concurrency, auto_batch), timeout=timeout
)
class LocalClient(BaseClient):
"""The local client is just a convenient method to target one specific
host.
"""
def __init__(self, connection_pool=None, **kwargs):
if connection_pool is None:
raise TypeError("The local client needs a connection pool")
BaseClient.__init__(self, connection_pool=connection_pool, **kwargs)
class MapManager(object):
"""Helps with mapping."""
def __init__(self, mapping_client, timeout):
self.mapping_client = mapping_client
self.timeout = timeout
self.entered = None
def __enter__(self):
self.entered = time.time()
return self.mapping_client
def __exit__(self, exc_type, exc_value, tb):
if exc_type is not None:
self.mapping_client.cancel()
else:
timeout = self.timeout
if timeout is not None:
timeout = max(1, timeout - (time.time() - self.entered))
self.mapping_client.join(timeout=timeout)
================================================
FILE: rb/cluster.py
================================================
from redis.connection import ConnectionPool, UnixDomainSocketConnection
try:
from redis.commands.core import Script # redis>=5
except ImportError:
from redis.client import Script # redis<5
try:
from redis.connection import SSLConnection
except ImportError:
SSLConnection = None
import functools
from hashlib import sha1
from threading import Lock
from rb.router import PartitionRouter
from rb.clients import RoutingClient, LocalClient
from rb.utils import integer_types, iteritems, itervalues
class HostInfo(object):
def __init__(
self,
host_id,
host,
port,
unix_socket_path=None,
db=0,
password=None,
ssl=False,
ssl_options=None,
):
self.host_id = host_id
self.host = host
self.unix_socket_path = unix_socket_path
self.port = port
self.db = db
self.password = password
self.ssl = ssl
self.ssl_options = ssl_options
def __eq__(self, other):
if self.__class__ is not other.__class__:
return NotImplemented
return self.host_id == other.host_id
def __ne__(self, other):
rv = self.__eq__(other)
if rv is NotImplemented:
return NotImplemented
return not rv
def __hash__(self):
return self.host_id
def __repr__(self):
return "<%s %s>" % (
self.__class__.__name__,
" ".join("%s=%r" % x for x in sorted(self.__dict__.items())),
)
def _iter_hosts(iterable):
if isinstance(iterable, dict):
iterable = iteritems(iterable)
for item in iterable:
if isinstance(item, tuple):
host_id, cfg = item
cfg = dict(cfg)
cfg["host_id"] = host_id
else:
cfg = item
yield cfg
class Cluster(object):
"""The cluster is the core object behind rb. It holds the connection
pools to the individual nodes and can be shared for the duration of
the application in a central location.
Basic example of a cluster over four redis instances with the default
router::
cluster = Cluster(hosts={
0: {'port': 6379},
1: {'port': 6380},
2: {'port': 6381},
3: {'port': 6382},
}, host_defaults={
'host': '127.0.0.1',
})
`hosts` is a dictionary of hosts which maps the number host IDs to
configuration parameters. The parameters correspond to the signature
of the :meth:`add_host` function. The defaults for these parameters
are pulled from `host_defaults`. To override the pool class the
`pool_cls` and `pool_options` parameters can be used. The same
applies to `router_cls` and `router_options` for the router. The pool
options are useful for setting socket timeouts and similar parameters.
"""
def __init__(
self,
hosts,
host_defaults=None,
pool_cls=None,
pool_options=None,
router_cls=None,
router_options=None,
):
if pool_cls is None:
pool_cls = ConnectionPool
if router_cls is None:
router_cls = PartitionRouter
self._lock = Lock()
self.pool_cls = pool_cls
self.pool_options = pool_options
self.router_cls = router_cls
self.router_options = router_options
self._pools = {}
self._router = None
self.hosts = {}
self._hosts_age = 0
self.host_defaults = host_defaults or {}
for host_config in _iter_hosts(hosts):
if self.host_defaults:
for k, v in iteritems(self.host_defaults):
host_config.setdefault(k, v)
self.add_host(**host_config)
def add_host(
self,
host_id=None,
host="localhost",
port=6379,
unix_socket_path=None,
db=0,
password=None,
ssl=False,
ssl_options=None,
):
"""Adds a new host to the cluster. This is only really useful for
unittests as normally hosts are added through the constructor and
changes after the cluster has been used for the first time are
unlikely to make sense.
"""
if host_id is None:
raise RuntimeError("Host ID is required")
elif not isinstance(host_id, integer_types):
raise ValueError("The host ID has to be an integer")
host_id = int(host_id)
with self._lock:
if host_id in self.hosts:
raise TypeError("Two hosts share the same host id (%r)" % (host_id,))
self.hosts[host_id] = HostInfo(
host_id=host_id,
host=host,
port=port,
db=db,
unix_socket_path=unix_socket_path,
password=password,
ssl=ssl,
ssl_options=ssl_options,
)
self._hosts_age += 1
def remove_host(self, host_id):
"""Removes a host from the client. This only really useful for
unittests.
"""
with self._lock:
rv = self._hosts.pop(host_id, None) is not None
pool = self._pools.pop(host_id, None)
if pool is not None:
pool.disconnect()
self._hosts_age += 1
return rv
def disconnect_pools(self):
"""Disconnects all connections from the internal pools."""
with self._lock:
for pool in itervalues(self._pools):
pool.disconnect()
self._pools.clear()
def get_router(self):
"""Returns the router for the cluster. If the cluster reconfigures
the router will be recreated. Usually you do not need to interface
with the router yourself as the cluster's routing client does that
automatically.
This returns an instance of :class:`BaseRouter`.
"""
cached_router = self._router
ref_age = self._hosts_age
if cached_router is not None:
router, router_age = cached_router
if router_age == ref_age:
return router
with self._lock:
router = self.router_cls(self, **(self.router_options or {}))
self._router = (router, ref_age)
return router
def get_pool_for_host(self, host_id):
"""Returns the connection pool for the given host.
This connection pool is used by the redis clients to make sure
that it does not have to reconnect constantly. If you want to use
a custom redis client you can pass this in as connection pool
manually.
"""
if isinstance(host_id, HostInfo):
host_info = host_id
host_id = host_info.host_id
else:
host_info = self.hosts.get(host_id)
if host_info is None:
raise LookupError("Host %r does not exist" % (host_id,))
rv = self._pools.get(host_id)
if rv is not None:
return rv
with self._lock:
rv = self._pools.get(host_id)
if rv is None:
opts = dict(self.pool_options or ())
opts["db"] = host_info.db
opts["password"] = host_info.password
if host_info.unix_socket_path is not None:
opts["path"] = host_info.unix_socket_path
opts["connection_class"] = UnixDomainSocketConnection
if host_info.ssl:
raise TypeError(
"SSL is not supported for unix " "domain sockets."
)
else:
opts["host"] = host_info.host
opts["port"] = host_info.port
if host_info.ssl:
if SSLConnection is None:
raise TypeError(
"This version of py-redis does "
"not support SSL connections."
)
opts["connection_class"] = SSLConnection
opts.update(
("ssl_" + k, v)
for k, v in iteritems(host_info.ssl_options or {})
)
rv = self.pool_cls(**opts)
self._pools[host_id] = rv
return rv
def get_local_client(self, host_id):
"""Returns a localized client for a specific host ID. This client
works like a regular Python redis client and returns results
immediately.
"""
return LocalClient(connection_pool=self.get_pool_for_host(host_id))
def get_local_client_for_key(self, key):
"""Similar to :meth:`get_local_client_for_key` but returns the
client based on what the router says the key destination is.
"""
return self.get_local_client(self.get_router().get_host_for_key(key))
def get_routing_client(self, auto_batch=True):
"""Returns a routing client. This client is able to automatically
route the requests to the individual hosts. It's thread safe and
can be used similar to the host local client but it will refused
to execute commands that cannot be directly routed to an
individual node.
The default behavior for the routing client is to attempt to batch
eligible commands into batch versions thereof. For instance multiple
`GET` commands routed to the same node can end up merged into an
`MGET` command. This behavior can be disabled by setting `auto_batch`
to `False`. This can be useful for debugging because `MONITOR` will
more accurately reflect the commands issued in code.
See :class:`RoutingClient` for more information.
"""
return RoutingClient(self, auto_batch=auto_batch)
def map(self, timeout=None, max_concurrency=64, auto_batch=True):
"""Shortcut context manager for getting a routing client, beginning
a map operation and joining over the result. `max_concurrency`
defines how many outstanding parallel queries can exist before an
implicit join takes place.
In the context manager the client available is a
:class:`MappingClient`. Example usage::
results = {}
with cluster.map() as client:
for key in keys_to_fetch:
results[key] = client.get(key)
for key, promise in results.iteritems():
print '%s => %s' % (key, promise.value)
"""
return self.get_routing_client(auto_batch).map(
timeout=timeout, max_concurrency=max_concurrency
)
def fanout(self, hosts=None, timeout=None, max_concurrency=64, auto_batch=True):
"""Shortcut context manager for getting a routing client, beginning
a fanout operation and joining over the result.
In the context manager the client available is a
:class:`FanoutClient`. Example usage::
with cluster.fanout(hosts='all') as client:
client.flushdb()
"""
return self.get_routing_client(auto_batch).fanout(
hosts=hosts, timeout=timeout, max_concurrency=max_concurrency
)
def all(self, timeout=None, max_concurrency=64, auto_batch=True):
"""Fanout to all hosts. Works otherwise exactly like :meth:`fanout`.
Example::
with cluster.all() as client:
client.flushdb()
"""
return self.fanout(
"all",
timeout=timeout,
max_concurrency=max_concurrency,
auto_batch=auto_batch,
)
def execute_commands(self, mapping, *args, **kwargs):
"""Concurrently executes a sequence of commands on a Redis cluster that
are associated with a routing key, returning a new mapping where
values are a list of results that correspond to the command in the same
position. For example::
>>> cluster.execute_commands({
... 'foo': [
... ('PING',),
... ('TIME',),
... ],
... 'bar': [
... ('CLIENT', 'GETNAME'),
... ],
... })
{'bar': [<Promise None>],
'foo': [<Promise True>, <Promise (1454446079, 418404)>]}
Commands that are instances of :class:`redis.client.Script` will first
be checked for their existence on the target nodes then loaded on the
targets before executing and can be interleaved with other commands::
>>> from redis.client import Script
>>> TestScript = Script(None, 'return {KEYS, ARGV}')
>>> cluster.execute_commands({
... 'foo': [
... (TestScript, ('key:1', 'key:2'), range(0, 3)),
... ],
... 'bar': [
... (TestScript, ('key:3', 'key:4'), range(3, 6)),
... ],
... })
{'bar': [<Promise [['key:3', 'key:4'], ['3', '4', '5']]>],
'foo': [<Promise [['key:1', 'key:2'], ['0', '1', '2']]>]}
Internally, :class:`FanoutClient` is used for issuing commands.
"""
def is_script_command(command):
return isinstance(command[0], Script)
def check_script_load_result(script, result):
if script.sha != result:
raise AssertionError(
"Hash mismatch loading {!r}: expected {!r}, got {!r}".format(
script, script.sha, result,
)
)
# Run through all the commands and check to see if there are any
# scripts, and whether or not they have been loaded onto the target
# hosts.
exists = {}
with self.fanout(*args, **kwargs) as client:
for key, commands in mapping.items():
targeted = client.target_key(key)
for command in filter(is_script_command, commands):
script = command[0]
# Set the script hash if it hasn't already been set.
if not script.sha:
script.sha = sha1(script.script.encode("utf-8")).hexdigest()
# Check if the script has been loaded on each host that it
# will be executed on.
for host in targeted._target_hosts:
if script not in exists.setdefault(host, {}):
exists[host][script] = targeted.execute_command(
"SCRIPT EXISTS", script.sha
)
# Execute the pending commands, loading scripts onto servers where they
# do not already exist.
results = {}
with self.fanout(*args, **kwargs) as client:
for key, commands in mapping.items():
results[key] = []
targeted = client.target_key(key)
for command in commands:
# If this command is a script, we need to check and see if
# it needs to be loaded before execution.
if is_script_command(command):
script = command[0]
for host in targeted._target_hosts:
if script in exists[host]:
result = exists[host].pop(script)
if not result.value[0]:
targeted.execute_command(
"SCRIPT LOAD", script.script
).done(
on_success=functools.partial(
check_script_load_result, script
)
)
keys, arguments = command[1:]
parameters = list(keys) + list(arguments)
results[key].append(
targeted.execute_command(
"EVALSHA", script.sha, len(keys), *parameters
)
)
else:
results[key].append(targeted.execute_command(*command))
return results
================================================
FILE: rb/ketama.py
================================================
import hashlib
import math
from bisect import bisect
from rb.utils import text_type, integer_types, bytes_type
def md5_bytes(key):
if isinstance(key, text_type):
k = key.encode("utf-8")
elif isinstance(key, integer_types):
k = text_type(key).encode("utf-8")
else:
k = bytes_type(key)
return bytearray(hashlib.md5(k).digest())
class Ketama(object):
"""This class implements the Ketama consistent hashing algorithm.
"""
def __init__(self, nodes=None, weights=None):
self._nodes = set(nodes or [])
self._weights = weights if weights else {}
self._rebuild_circle()
def _rebuild_circle(self):
"""Updates the hash ring."""
self._hashring = {}
self._sorted_keys = []
total_weight = 0
for node in self._nodes:
total_weight += self._weights.get(node, 1)
for node in self._nodes:
weight = self._weights.get(node, 1)
ks = math.floor((40 * len(self._nodes) * weight) / total_weight)
for i in range(0, int(ks)):
k = md5_bytes("%s-%s-salt" % (node, i))
for l in range(0, 4):
key = (
(k[3 + l * 4] << 24)
| (k[2 + l * 4] << 16)
| (k[1 + l * 4] << 8)
| k[l * 4]
)
self._hashring[key] = node
self._sorted_keys.append(key)
self._sorted_keys.sort()
def _get_node_pos(self, key):
"""Return node position(integer) for a given key or None."""
if not self._hashring:
return
k = md5_bytes(key)
key = (k[3] << 24) | (k[2] << 16) | (k[1] << 8) | k[0]
nodes = self._sorted_keys
pos = bisect(nodes, key)
if pos == len(nodes):
return 0
return pos
def remove_node(self, node):
"""Removes node from circle and rebuild it."""
try:
self._nodes.remove(node)
del self._weights[node]
except (KeyError, ValueError):
pass
self._rebuild_circle()
def add_node(self, node, weight=1):
"""Adds node to circle and rebuild it."""
self._nodes.add(node)
self._weights[node] = weight
self._rebuild_circle()
def get_node(self, key):
"""Return node for a given key. Else return None."""
pos = self._get_node_pos(key)
if pos is None:
return None
return self._hashring[self._sorted_keys[pos]]
================================================
FILE: rb/poll.py
================================================
import fcntl
import array
import select
import termios
class BasePoller(object):
is_available = False
def __init__(self):
self.objects = {}
def register(self, key, f):
self.objects[key] = f
def unregister(self, key):
return self.objects.pop(key, None)
def poll(self, timeout=None):
raise NotImplementedError()
def get(self, key):
return self.objects.get(key)
def __len__(self):
return len(self.objects)
def __iter__(self):
# Make a copy when iterating so that modifications to this object
# are possible while we're going over it.
return iter(self.objects.values())
class SelectPoller(BasePoller):
is_available = hasattr(select, "select")
def poll(self, timeout=None):
objs = list(self.objects.values())
rlist, wlist, xlist = select.select(objs, objs, [], timeout)
if xlist:
raise RuntimeError("Got unexpected OOB data")
return [(x, "read") for x in rlist] + [(x, "write") for x in wlist]
class PollPoller(BasePoller):
is_available = hasattr(select, "poll")
def __init__(self):
BasePoller.__init__(self)
self.pollobj = select.poll()
self.fd_to_object = {}
def register(self, key, f):
BasePoller.register(self, key, f)
self.pollobj.register(
f.fileno(), select.POLLIN | select.POLLOUT | select.POLLHUP
)
self.fd_to_object[f.fileno()] = f
def unregister(self, key):
rv = BasePoller.unregister(self, key)
if rv is not None:
self.pollobj.unregister(rv.fileno())
self.fd_to_object.pop(rv.fileno(), None)
return rv
def poll(self, timeout=None):
rv = []
for fd, event in self.pollobj.poll(timeout):
obj = self.fd_to_object[fd]
if event & select.POLLIN:
rv.append((obj, "read"))
if event & select.POLLOUT:
rv.append((obj, "write"))
if event & select.POLLHUP:
rv.append((obj, "close"))
return rv
class KQueuePoller(BasePoller):
is_available = hasattr(select, "kqueue")
def __init__(self):
BasePoller.__init__(self)
self.kqueue = select.kqueue()
self.events = []
self.event_to_object = {}
def register(self, key, f):
BasePoller.register(self, key, f)
r_event = select.kevent(
f.fileno(),
filter=select.KQ_FILTER_READ,
flags=select.KQ_EV_ADD | select.KQ_EV_ENABLE,
)
self.events.append(r_event)
w_event = select.kevent(
f.fileno(),
filter=select.KQ_FILTER_WRITE,
flags=select.KQ_EV_ADD | select.KQ_EV_ENABLE,
)
self.events.append(w_event)
self.event_to_object[f.fileno()] = f
def unregister(self, key):
rv = BasePoller.unregister(self, key)
if rv is not None:
fd = rv.fileno()
self.events = [x for x in self.events if x.ident != fd]
self.event_to_object.pop(fd, None)
return rv
def poll(self, timeout=None):
events = self.kqueue.control(self.events, 128, timeout)
rv = []
for ev in events:
obj = self.event_to_object.get(ev.ident)
if obj is None:
# It happens surprisingly frequently that kqueue returns
# write events things no longer in the kqueue. Not sure
# why
continue
if ev.filter == select.KQ_FILTER_READ:
rv.append((obj, "read"))
elif ev.filter == select.KQ_FILTER_WRITE:
rv.append((obj, "write"))
if ev.flags & select.KQ_EV_EOF:
rv.append((obj, "close"))
return rv
class EpollPoller(BasePoller):
is_available = hasattr(select, "epoll")
def __init__(self):
BasePoller.__init__(self)
self.epoll = select.epoll()
self.fd_to_object = {}
def register(self, key, f):
BasePoller.register(self, key, f)
self.epoll.register(
f.fileno(), select.EPOLLIN | select.EPOLLHUP | select.EPOLLOUT
)
self.fd_to_object[f.fileno()] = f
def unregister(self, key):
rv = BasePoller.unregister(self, key)
if rv is not None:
self.epoll.unregister(rv.fileno())
self.fd_to_object.pop(rv.fileno(), None)
return rv
def poll(self, timeout=None):
if timeout is None:
timeout = -1
rv = []
for fd, event in self.epoll.poll(timeout):
obj = self.fd_to_object[fd]
if event & select.EPOLLIN:
rv.append((obj, "read"))
if event & select.EPOLLOUT:
rv.append((obj, "write"))
if event & select.EPOLLHUP:
rv.append((obj, "close"))
return rv
def _is_closed_select(f):
rlist, wlist, _ = select.select([f], [f], [], 0.0)
if not rlist and not wlist:
return False
buf = array.array("i", [0])
fcntl.ioctl(f.fileno(), termios.FIONREAD, buf)
return buf[0] == 0
def _is_closed_poll(f):
poll = select.poll()
poll.register(f.fileno(), select.POLLHUP)
for _, event in poll.poll(0.0):
if event == "close":
return True
return False
def _is_closed_kqueue(f):
kqueue = select.kqueue()
event = select.kevent(
f.fileno(),
filter=select.KQ_FILTER_READ,
flags=select.KQ_EV_ADD | select.KQ_EV_ENABLE,
)
for event in kqueue.control([event], 128, 0.0):
if event.flags & select.KQ_EV_EOF:
return True
return False
def is_closed(f):
if KQueuePoller.is_available:
return _is_closed_kqueue(f)
if PollPoller.is_available:
return _is_closed_poll(f)
return _is_closed_select(f)
available_pollers = [
poll
for poll in [KQueuePoller, PollPoller, EpollPoller, SelectPoller]
if poll.is_available
]
poll = available_pollers[0]
================================================
FILE: rb/promise.py
================================================
from functools import partial
from rb.utils import iteritems
class Promise(object):
"""A promise object that attempts to mirror the ES6 APIs for promise
objects. Unlike ES6 promises this one however also directly gives
access to the underlying value and it has some slightly different
static method names as this promise can be resolved externally.
"""
__slots__ = ("value", "reason", "_state", "_callbacks", "_errbacks")
def __init__(self):
#: the value that this promise holds if it's resolved.
self.value = None
#: the reason for this promise if it's rejected.
self.reason = None
self._state = "pending"
self._callbacks = []
self._errbacks = []
@staticmethod
def resolved(value):
"""Creates a promise object resolved with a certain value."""
p = Promise()
p._state = "resolved"
p.value = value
return p
@staticmethod
def rejected(reason):
"""Creates a promise object rejected with a certain value."""
p = Promise()
p._state = "rejected"
p.reason = reason
return p
@staticmethod
def all(iterable_or_dict):
"""A promise that resolves when all passed promises resolve. You can
either pass a list or a dictionary of promises.
"""
if isinstance(iterable_or_dict, dict):
return _promise_from_dict(iterable_or_dict)
return _promise_from_iterable(iterable_or_dict)
def resolve(self, value):
"""Resolves the promise with the given value."""
if self is value:
raise TypeError("Cannot resolve promise with itself.")
if isinstance(value, Promise):
value.done(self.resolve, self.reject)
return
if self._state != "pending":
raise RuntimeError("Promise is no longer pending.")
self.value = value
self._state = "resolved"
callbacks = self._callbacks
self._callbacks = None
for callback in callbacks:
callback(value)
def reject(self, reason):
"""Rejects the promise with the given reason."""
if self._state != "pending":
raise RuntimeError("Promise is no longer pending.")
self.reason = reason
self._state = "rejected"
errbacks = self._errbacks
self._errbacks = None
for errback in errbacks:
errback(reason)
@property
def is_pending(self):
"""`True` if the promise is still pending, `False` otherwise."""
return self._state == "pending"
@property
def is_resolved(self):
"""`True` if the promise was resolved, `False` otherwise."""
return self._state == "resolved"
@property
def is_rejected(self):
"""`True` if the promise was rejected, `False` otherwise."""
return self._state == "rejected"
def done(self, on_success=None, on_failure=None):
"""Attaches some callbacks to the promise and returns the promise."""
if on_success is not None:
if self._state == "pending":
self._callbacks.append(on_success)
elif self._state == "resolved":
on_success(self.value)
if on_failure is not None:
if self._state == "pending":
self._errbacks.append(on_failure)
elif self._state == "rejected":
on_failure(self.reason)
return self
def then(self, success=None, failure=None):
"""A utility method to add success and/or failure callback to the
promise which will also return another promise in the process.
"""
rv = Promise()
def on_success(v):
try:
rv.resolve(success(v))
except Exception as e:
rv.reject(e)
def on_failure(r):
try:
rv.resolve(failure(r))
except Exception as e:
rv.reject(e)
self.done(on_success, on_failure)
return rv
def __repr__(self):
if self._state == "pending":
v = "(pending)"
elif self._state == "rejected":
v = repr(self.reason) + " (rejected)"
else:
v = repr(self.value)
return "<%s %s>" % (self.__class__.__name__, v,)
def _ensure_promise(value):
return value if isinstance(value, Promise) else Promise.resolved(value)
def _promise_from_iterable(iterable):
l = [_ensure_promise(x) for x in iterable]
if not l:
return Promise.resolved([])
pending = set(l)
rv = Promise()
def on_success(promise, value):
pending.discard(promise)
if not pending:
rv.resolve([p.value for p in l])
for promise in l:
promise.done(partial(on_success, promise), rv.reject)
return rv
def _promise_from_dict(d):
d = dict((k, _ensure_promise(v)) for k, v in iteritems(d))
if not d:
return Promise.resolved({})
pending = set(d.keys())
rv = Promise()
def on_success(key, value):
pending.discard(key)
if not pending:
rv.resolve(dict((k, p.value) for k, p in iteritems(d)))
for key, promise in iteritems(d):
promise.done(partial(on_success, key), rv.reject)
return rv
================================================
FILE: rb/router.py
================================================
from weakref import ref as weakref
from rb.ketama import Ketama
from rb.utils import text_type, bytes_type, integer_types, crc32
from rb._rediscommands import COMMANDS
class UnroutableCommand(Exception):
"""Raised if a command was issued that cannot be routed through the
router to a single host.
"""
class BadHostSetup(Exception):
"""Raised if the cluster's host setup is not compatible with the
router.
"""
def extract_keys(args, key_spec):
first, last, step = key_spec
rv = []
for idx, arg in enumerate(args, 1):
if last >= 0 and idx > last:
break
if idx >= first and ((idx - first) % step) == 0:
rv.append(arg)
return rv
def assert_gapless_hosts(hosts):
if not hosts:
raise BadHostSetup("No hosts were configured.")
for x in range(len(hosts)):
if hosts.get(x) is None:
raise BadHostSetup(
'Expected host with ID "%d" but no such ' "host was found." % x
)
class BaseRouter(object):
"""Baseclass for all routers. If you want to implement a custom router
this is what you subclass.
"""
def __init__(self, cluster):
# this is a weakref because the router is cached on the cluster
# and otherwise we end up in circular reference land and we are
# having problems being garbage collected.
self._cluster = weakref(cluster)
@property
def cluster(self):
"""Reference back to the :class:`Cluster` this router belongs to."""
rv = self._cluster()
if rv is None:
raise RuntimeError("Cluster went away")
return rv
def get_key(self, command, args):
"""Returns the key a command operates on."""
spec = COMMANDS.get(command.upper())
if spec is None:
raise UnroutableCommand(
'The command "%r" is unknown to the '
"router and cannot be handled as a "
"result." % command
)
if "movablekeys" in spec["flags"]:
raise UnroutableCommand(
'The keys for "%r" are movable and '
"as such cannot be routed to a single "
"host."
)
keys = extract_keys(args, spec["key_spec"])
if len(keys) == 1:
return keys[0]
elif not keys:
raise UnroutableCommand(
'The command "%r" does not operate on a key which means '
"that no suitable host could be determined. Consider "
"using a fanout instead."
)
raise UnroutableCommand(
'The command "%r" operates on multiple keys (%d passed) which is '
"something that is not supported." % (command, len(keys))
)
def get_host_for_command(self, command, args):
"""Returns the host this command should be executed against."""
return self.get_host_for_key(self.get_key(command, args))
def get_host_for_key(self, key):
"""Perform routing and return host_id of the target.
Subclasses need to implement this.
"""
raise NotImplementedError()
class ConsistentHashingRouter(BaseRouter):
"""Router that returns the host_id based on a consistent hashing
algorithm. The consistent hashing algorithm only works if a key
argument is provided.
This router requires that the hosts are gapless which means that
the IDs for N hosts range from 0 to N-1.
"""
def __init__(self, cluster):
BaseRouter.__init__(self, cluster)
self._host_id_id_map = dict(self.cluster.hosts.items())
self._hash = Ketama(self._host_id_id_map.values())
assert_gapless_hosts(self.cluster.hosts)
def get_host_for_key(self, key):
rv = self._hash.get_node(key)
if rv is None:
raise UnroutableCommand("Did not find a suitable " "host for the key.")
return rv
class PartitionRouter(BaseRouter):
"""A straightforward router that just individually routes commands to
single nodes based on a simple ``crc32 % node_count`` setup.
This router requires that the hosts are gapless which means that
the IDs for N hosts range from 0 to N-1.
"""
def __init__(self, cluster):
BaseRouter.__init__(self, cluster)
assert_gapless_hosts(self.cluster.hosts)
def get_host_for_key(self, key):
if isinstance(key, text_type):
k = key.encode("utf-8")
elif isinstance(key, integer_types):
k = text_type(key).encode("utf-8")
else:
k = bytes_type(key)
return crc32(k) % len(self.cluster.hosts)
================================================
FILE: rb/testing.py
================================================
import os
import time
import uuid
import shutil
import socket
import tempfile
from contextlib import contextmanager
from subprocess import Popen, PIPE
from rb.cluster import Cluster
from rb.utils import itervalues
devnull = open(os.devnull, "r+")
class Server(object):
def __init__(self, cl, socket_path):
self._cl = cl
self.socket_path = socket_path
def test_connection(self):
try:
s = socket.socket(socket.AF_UNIX, socket.SOCK_STREAM)
s.connect(self.socket_path)
except IOError:
return False
return True
def signal_stop(self):
if self._cl is not None:
self._cl.kill()
def close(self):
if self._cl is not None:
self.signal_stop()
self._cl.wait()
self._cl = None
try:
os.remove(self.socket_path)
except OSError:
pass
class TestSetup(object):
"""The test setup is a convenient way to spawn multiple redis servers
for testing and to shut them down automatically. This can be used as
a context manager to automatically terminate the clients.
"""
def __init__(self, servers=4, databases_each=8, server_executable="redis-server"):
self._fd_dir = tempfile.mkdtemp()
self.databases_each = databases_each
self.server_executable = server_executable
self.servers = []
for server in range(servers):
self.spawn_server()
def __enter__(self):
return self
def __exit__(self, exc_type, exc_value, tb):
self.close()
def make_cluster(self):
"""Creates a correctly configured cluster from the servers
spawned. This also automatically waits for the servers to be up.
"""
self.wait_for_servers()
hosts = []
host_id = 0
for server in self.servers:
for x in range(self.databases_each):
hosts.append(
{
"host_id": host_id,
"unix_socket_path": server.socket_path,
"db": x,
}
)
host_id += 1
return Cluster(
hosts, pool_options={"encoding": "utf-8", "decode_responses": True}
)
def spawn_server(self):
"""Spawns a new server and adds it to the pool."""
socket_path = os.path.join(self._fd_dir, str(uuid.uuid4()))
cl = Popen([self.server_executable, "-"], stdin=PIPE, stdout=devnull)
cl.stdin.write(
(
"""
port 0
unixsocket %(path)s
databases %(databases)d
save ""
"""
% {"path": socket_path, "databases": self.databases_each,}
).encode("utf-8")
)
cl.stdin.flush()
cl.stdin.close()
self.servers.append(Server(cl, socket_path))
def wait_for_servers(self, timeout=10):
"""Waits for all servers to to be up and running."""
unconnected_servers = dict((x.socket_path, x) for x in self.servers)
now = time.time()
while unconnected_servers:
for server in itervalues(unconnected_servers):
if server.test_connection():
unconnected_servers.pop(server.socket_path, None)
break
if time.time() > now + timeout:
return False
if unconnected_servers:
time.sleep(0.05)
return True
def close(self):
"""Closes the test setup which shuts down all redis servers."""
for server in self.servers:
server.signal_stop()
for server in self.servers:
server.close()
try:
shutil.rmtree(self._fd_dir)
except (OSError, IOError):
pass
def __del__(self):
try:
self.close()
except Exception:
pass
@contextmanager
def make_test_cluster(*args, **kwargs):
"""Convenient shortcut for creating a test setup and then a cluster
from it. This must be used as a context manager::
from rb.testing import make_test_cluster
with make_test_cluster() as cluster:
...
"""
with TestSetup(*args, **kwargs) as ts:
cluster = ts.make_cluster()
try:
yield cluster
finally:
cluster.disconnect_pools()
================================================
FILE: rb/utils.py
================================================
from __future__ import absolute_import
import sys
PY2 = sys.version_info[0] == 2
if PY2:
integer_types = (int, long)
text_type = unicode
bytes_type = str
def iteritems(d, **kw):
return iter(d.iteritems(**kw))
def itervalues(d, **kw):
return iter(d.itervalues(**kw))
from itertools import izip
from binascii import crc32
else:
integer_types = (int,)
text_type = str
bytes_type = bytes
izip = zip
def iteritems(d, **kw):
return iter(d.items(**kw))
def itervalues(d, **kw):
return iter(d.values(**kw))
from binascii import crc32 as _crc32
# In python3 crc32 was changed to never return a signed value, which is
# different from the python2 implementation. As noted in
# https://docs.python.org/3/library/binascii.html#binascii.crc32
#
# Note the documentation suggests the following:
#
# > Changed in version 3.0: The result is always unsigned. To generate the
# > same numeric value across all Python versions and platforms, use
# > crc32(data) & 0xffffffff.
#
# However this will not work when transitioning between versions, as the
# value MUST match what was generated in python 2.
#
# We can sign the return value using the following bit math to ensure we
# match the python2 output of crc32.
def crc32(*args):
rt = _crc32(*args)
return rt - ((rt & 0x80000000) << 1)
================================================
FILE: scripts/bump-version.sh
================================================
#!/bin/bash
set -eu
SCRIPT_DIR="$( cd "$( dirname "${BASH_SOURCE[0]}" )" && pwd )"
cd $SCRIPT_DIR/..
OLD_VERSION="$1"
NEW_VERSION="$2"
sed -i -e "s/^__version__ = "'".*"'"\$/__version__ = "'"'"$NEW_VERSION"'"'"/" rb/__init__.py
echo "New version: $NEW_VERSION"
================================================
FILE: setup.cfg
================================================
[bdist_wheel]
universal = 1
================================================
FILE: setup.py
================================================
import re
import ast
import os
from setuptools import setup
_version_re = re.compile(r"__version__\s+=\s+(.*)")
with open("rb/__init__.py", "rb") as f:
version = str(
ast.literal_eval(_version_re.search(f.read().decode("utf-8")).group(1))
)
install_requires = ["redis>=2.6,!=3.4.0"]
# override django version in requirements file if DJANGO_VERSION is set
REDIS_VERSION = os.environ.get('REDIS_VERSION')
if REDIS_VERSION:
install_requires = [
u'redis{}'.format(REDIS_VERSION)
if r.startswith('redis>=') else r
for r in install_requires
]
setup(
name="rb",
author="Functional Software Inc.",
author_email="hello@getsentry.com",
version=version,
url="http://github.com/getsentry/rb",
packages=["rb"],
description="rb, the redis blaster",
install_requires=install_requires,
classifiers=[
"License :: OSI Approved :: Apache Software License",
"Programming Language :: Python",
],
)
================================================
FILE: tests/conftest.py
================================================
import pytest
from rb.testing import make_test_cluster
@pytest.fixture
def cluster(request):
mgr = make_test_cluster()
cluster = mgr.__enter__()
@request.addfinalizer
def cleanup():
mgr.__exit__(None, None, None)
return cluster
================================================
FILE: tests/test_cluster.py
================================================
import time
import pytest
import redis
from redis.exceptions import ResponseError
from rb.cluster import Cluster
from rb.router import UnroutableCommand
from rb.promise import Promise
from rb.utils import text_type
try:
from redis.commands.core import Script
except ImportError:
from redis.client import Script
def test_basic_interface():
cluster = Cluster(
{0: {"db": 0}, 1: {"db": 2}, 2: {"db": 4, "host": "127.0.0.1"},},
host_defaults={"password": "pass",},
pool_options={"encoding": "utf-8", "decode_responses": True},
)
assert len(cluster.hosts) == 3
assert cluster.hosts[0].host_id == 0
assert cluster.hosts[0].db == 0
assert cluster.hosts[0].host == "localhost"
assert cluster.hosts[0].port == 6379
assert cluster.hosts[0].password == "pass"
assert cluster.hosts[1].host_id == 1
assert cluster.hosts[1].db == 2
assert cluster.hosts[1].host == "localhost"
assert cluster.hosts[1].port == 6379
assert cluster.hosts[1].password == "pass"
assert cluster.hosts[2].host_id == 2
assert cluster.hosts[2].db == 4
assert cluster.hosts[2].host == "127.0.0.1"
assert cluster.hosts[2].port == 6379
assert cluster.hosts[2].password == "pass"
def test_router_access():
cluster = Cluster(
{0: {"db": 0},}, pool_options={"encoding": "utf-8", "decode_responses": True}
)
router = cluster.get_router()
assert router.cluster is cluster
assert cluster.get_router() is router
cluster.add_host(1, {"db": 1})
new_router = cluster.get_router()
assert new_router is not router
def test_basic_cluster(cluster):
iterations = 10000
with cluster.map() as client:
for x in range(iterations):
client.set("key-%06d" % x, x)
responses = []
with cluster.map() as client:
for x in range(iterations):
responses.append(client.get("key-%06d" % x))
ref_sum = sum(int(x.value) for x in responses)
assert ref_sum == sum(range(iterations))
def test_basic_cluster_disabled_batch(cluster):
iterations = 10000
with cluster.map(auto_batch=False) as client:
for x in range(iterations):
client.set("key-%06d" % x, x)
responses = []
with cluster.map(auto_batch=False) as client:
for x in range(iterations):
responses.append(client.get("key-%06d" % x))
ref_sum = sum(int(x.value) for x in responses)
assert ref_sum == sum(range(iterations))
def make_zset_data(x):
return [(str(i), float(i)) for i in range(x, x + 10)]
def test_simple_api(cluster):
client = cluster.get_routing_client()
with client.map() as map_client:
for x in range(10):
map_client.set("key:%d" % x, x)
if redis.VERSION >= (3, 0, 0):
map_client.zadd("zset:%d" % x, dict(make_zset_data(x)))
else:
map_client.zadd("zset:%d" % x, **dict(make_zset_data(x)))
for x in range(10):
assert client.get("key:%d" % x) == str(x)
assert client.zrange("zset:%d" % x, 0, -1, withscores=True) == make_zset_data(x)
results = [] # (promise, expected result)
with client.map() as map_client:
for x in range(10):
results.append(
(
map_client.zrange("zset:%d" % x, 0, -1, withscores=True),
make_zset_data(x),
)
)
for promise, expectation in results:
assert promise.value == expectation
with client.map() as map_client:
for x in range(10):
map_client.delete("key:%d" % x)
for x in range(10):
assert client.get("key:%d" % x) is None
def test_routing_client_releases_connection_on_error(cluster):
client = cluster.get_routing_client()
with pytest.raises(ResponseError):
client.sadd("key")
host = cluster.get_router().get_host_for_command("sadd", ["key"])
pool = cluster.get_pool_for_host(host)
assert len(pool._available_connections) == pool._created_connections
def test_mapping_client_releases_connection_on_error(cluster):
client = cluster.get_routing_client().get_mapping_client()
client.sadd("key")
with pytest.raises(ResponseError):
client.join()
host = cluster.get_router().get_host_for_command("sadd", ["key"])
pool = cluster.get_pool_for_host(host)
assert len(pool._available_connections) == pool._created_connections
def test_managed_mapping_client_releases_connection_on_error(cluster):
with pytest.raises(ResponseError):
with cluster.get_routing_client().map() as client:
client.sadd("key")
host = cluster.get_router().get_host_for_command("sadd", ["key"])
pool = cluster.get_pool_for_host(host)
assert len(pool._available_connections) == pool._created_connections
def test_multi_keys_rejected(cluster):
client = cluster.get_routing_client()
# Okay
with client.map() as map_client:
map_client.delete("key")
# Not okay
with client.map() as map_client:
with pytest.raises(UnroutableCommand):
map_client.delete("key1", "key2")
def test_promise_api(cluster):
results = []
with cluster.map() as client:
for x in range(10):
client.set("key-%d" % x, x)
for x in range(10):
client.get("key-%d" % x).then(lambda x: results.append(int(x)))
assert sorted(results) == list(range(10))
def test_fanout_api(cluster):
for host_id in cluster.hosts:
client = cluster.get_local_client(host_id)
client.set("foo", str(host_id))
if redis.VERSION >= (3, 0, 0):
client.zadd("zset", dict(make_zset_data(host_id)))
else:
client.zadd("zset", **dict(make_zset_data(host_id)))
with cluster.fanout(hosts="all") as client:
get_result = client.get("foo")
zrange_result = client.zrange("zset", 0, -1, withscores=True)
for host_id in cluster.hosts:
assert get_result.value[host_id] == str(host_id)
assert zrange_result.value[host_id] == make_zset_data(host_id)
def test_fanout_key_target(cluster):
with cluster.fanout() as client:
c = client.target_key("foo")
c.set("foo", "42")
promise = c.get("foo")
assert promise.value == "42"
client = cluster.get_routing_client()
assert client.get("foo") == "42"
def test_fanout_targeting_api(cluster):
with cluster.fanout() as client:
client.target(hosts=[0, 1]).set("foo", 42)
rv = client.target(hosts="all").get("foo")
assert list(rv.value.values()).count("42") == 2
# Without hosts this should fail
with cluster.fanout() as client:
pytest.raises(RuntimeError, client.get, "bar")
def test_emulated_batch_apis(cluster):
with cluster.map() as map_client:
promise = map_client.mset(dict(("key:%s" % x, x) for x in range(10)))
assert promise.value is None
with cluster.map() as map_client:
promise = map_client.mget(["key:%s" % x for x in range(10)])
assert promise.value == list(map(text_type, range(10)))
def test_batch_promise_all(cluster):
with cluster.map() as client:
client.set("1", "a")
client.set("2", "b")
client.set("3", "c")
client.set("4", "d")
client.hset("a", "b", "XXX")
with cluster.map() as client:
rv = Promise.all(
[client.mget("1", "2"), client.hget("a", "b"), client.mget("3", "4"),]
)
assert rv.value == [["a", "b"], "XXX", ["c", "d"]]
def test_execute_commands(cluster):
TestScript = Script(cluster.get_local_client(0), "return {KEYS, ARGV}",)
# XXX: redis<2.10.6 didn't require that a ``Script`` be instantiated with a
# valid client as part of the constructor, which resulted in the SHA not
# actually being set until the script was executed. To ensure the legacy
# behavior still works, we manually unset the cached SHA before executing.
actual_script_hash = TestScript.sha
TestScript.sha = None
results = cluster.execute_commands(
{
"foo": [
("SET", "foo", "1"),
(TestScript, ("key",), ("value",)),
("GET", "foo"),
],
"bar": [
("INCRBY", "bar", "2"),
(TestScript, ("key",), ("value",)),
("GET", "bar"),
],
}
)
assert TestScript.sha == actual_script_hash
assert results["foo"][0].value
assert results["foo"][1].value == [["key"], ["value"]]
assert results["foo"][2].value == "1"
assert results["bar"][0].value == 2
assert results["bar"][1].value == [["key"], ["value"]]
assert results["bar"][2].value == "2"
def test_reconnect(cluster):
with cluster.map() as client:
for x in range(10):
client.set(text_type(x), text_type(x))
with cluster.all() as client:
client.config_set("timeout", 1)
time.sleep(2)
with cluster.map() as client:
rv = Promise.all([client.get(text_type(x)) for x in range(10)])
assert rv.value == list(map(text_type, range(10)))
================================================
FILE: tests/test_ketama.py
================================================
from rb.ketama import Ketama
def test_basic():
def test(k):
data = {}
for i in range(1000):
tower = k.get_node("a%s" % i)
data.setdefault(tower, 0)
data[tower] += 1
return [
k.get_node("Apple"),
k.get_node("Hello"),
k.get_node("Data"),
k.get_node("Computer"),
]
k = Ketama(
[
"192.168.0.1:6000",
"192.168.0.1:6001",
"192.168.0.1:6002",
"192.168.0.1:6003",
"192.168.0.1:6004",
"192.168.0.1:6005",
"192.168.0.1:6006",
"192.168.0.1:6008",
"192.168.0.1:6007",
]
)
assert test(k) == [
"192.168.0.1:6002",
"192.168.0.1:6007",
"192.168.0.1:6004",
"192.168.0.1:6004",
]
k.remove_node("192.168.0.1:6007")
assert test(k) == [
"192.168.0.1:6002",
"192.168.0.1:6000",
"192.168.0.1:6004",
"192.168.0.1:6004",
]
k.add_node("192.168.0.1:6007")
assert test(k) == [
"192.168.0.1:6002",
"192.168.0.1:6007",
"192.168.0.1:6004",
"192.168.0.1:6004",
]
================================================
FILE: tests/test_poll.py
================================================
import pytest
from rb import clients
from rb.poll import available_pollers
from rb.utils import text_type
@pytest.mark.parametrize(
"poll", available_pollers, ids=[x.__name__ for x in available_pollers]
)
def test_simple_api(cluster, poll, monkeypatch):
monkeypatch.setattr(clients, "poll", poll)
client = cluster.get_routing_client()
with client.map() as map_client:
for x in range(10):
map_client.set("key:%s" % x, x)
for x in range(10):
assert client.get("key:%d" % x) == text_type(x)
================================================
FILE: tests/test_promise.py
================================================
from rb.promise import Promise
def test_resolved_promise():
p = Promise.resolved(42)
assert p.is_resolved
assert not p.is_pending
assert not p.is_rejected
assert p.value == 42
def test_rejected_promise():
err = RuntimeError("So fail")
p = Promise.rejected(err)
assert not p.is_resolved
assert not p.is_pending
assert p.is_rejected
assert p.reason == err
def test_success_callbacks():
results = []
p = Promise()
assert p.is_pending
p.done(results.append)
assert results == []
p.resolve(42)
assert results == [42]
p = Promise.resolved(23)
p.done(results.append)
assert results == [42, 23]
def test_failure_callbacks():
results = []
p = Promise()
assert p.is_pending
p.done(on_failure=results.append)
assert results == []
p.reject(42)
assert results == [42]
p = Promise.rejected(23)
p.done(on_failure=results.append)
assert results == [42, 23]
def test_promise_then():
p = Promise.resolved([1, 2, 3])
def on_success(value):
return value + [4]
p2 = p.then(success=on_success)
assert p2.value == [1, 2, 3, 4]
def test_promise_all():
p = Promise.all([])
assert p.is_resolved
assert p.value == []
p = Promise.all({})
assert p.is_resolved
assert p.value == {}
p = Promise.all([Promise.resolved(1), Promise.resolved(2), Promise.resolved(3),])
assert p.is_resolved
assert p.value == [1, 2, 3]
p = Promise.all(
{
"key1": Promise.resolved(1),
"key2": Promise.resolved(2),
"key3": Promise.resolved(3),
}
)
assert p.is_resolved
assert p.value == {"key1": 1, "key2": 2, "key3": 3}
p = Promise.all([Promise.resolved(1), Promise.rejected(2), Promise.resolved(3),])
assert p.is_rejected
assert p.reason == 2
def test_auto_coercion():
p = Promise.all([1, 2, Promise.resolved(3)])
assert p.is_resolved
assert p.value == [1, 2, 3]
p = Promise.all({1: 1, 2: 2, 3: Promise.resolved(3)})
assert p.is_resolved
assert p.value == {1: 1, 2: 2, 3: 3}
================================================
FILE: tests/test_router.py
================================================
import pytest
from rb.cluster import Cluster
from rb.router import UnroutableCommand, extract_keys, BadHostSetup
def test_router_key_routing():
cluster = Cluster({0: {"db": 0},})
router = cluster.get_router()
assert router.get_key("INCR", ["foo"]) == "foo"
assert router.get_key("GET", ["bar"]) == "bar"
with pytest.raises(UnroutableCommand):
router.get_key("MGET", ["foo", "bar", "baz"])
with pytest.raises(UnroutableCommand):
router.get_key("UNKNOWN", [])
def test_host_validation():
cluster = Cluster(hosts={1: {}})
try:
cluster.get_router()
except BadHostSetup as e:
assert 'Expected host with ID "0"' in str(e)
else:
raise Exception("Expected runtime error")
def test_router_basics():
cluster = Cluster({0: {"db": 0}, 1: {"db": 1}, 2: {"db": 2},})
router = cluster.get_router()
assert router.get_host_for_command("INCR", ["foo"]) == 1
assert router.get_host_for_command("INCR", ["bar"]) == 2
assert router.get_host_for_command("INCR", ["baz"]) == 0
assert router.get_host_for_key("foo") == 1
assert router.get_host_for_key("bar") == 2
assert router.get_host_for_key("baz") == 0
def test_key_extraction():
assert extract_keys(["foo"], (1, 1, 1))
assert extract_keys(["foo", "value", "foo2", "value2"], (1, -1, 2)) == [
"foo",
"foo2",
]
assert extract_keys(["extra", "foo", "value", "foo2", "value2"], (2, -1, 2)) == [
"foo",
"foo2",
]
assert extract_keys(["foo", "foo2"], (1, -1, 1)) == ["foo", "foo2"]
================================================
FILE: tests/test_utils.py
================================================
import pytest
from rb.utils import bytes_type, crc32
def test_crc32():
"""
Test that we get consistent values from python 2/3
"""
assert crc32("test".encode("utf-8")) == -662733300
gitextract_cxrwzrwv/
├── .craft.yml
├── .github/
│ └── workflows/
│ ├── build.yml
│ ├── release.yml
│ └── test.yml
├── .gitignore
├── .python-version
├── CHANGES
├── LICENSE
├── Makefile
├── README.md
├── docs/
│ ├── Makefile
│ ├── _themes/
│ │ └── rb_theme/
│ │ ├── layout.html
│ │ ├── static/
│ │ │ └── rb.css_t
│ │ └── theme.conf
│ ├── conf.py
│ ├── index.rst
│ └── make.bat
├── hooks/
│ └── pre-commit
├── rb/
│ ├── __init__.py
│ ├── _rediscommands.py
│ ├── clients.py
│ ├── cluster.py
│ ├── ketama.py
│ ├── poll.py
│ ├── promise.py
│ ├── router.py
│ ├── testing.py
│ └── utils.py
├── scripts/
│ └── bump-version.sh
├── setup.cfg
├── setup.py
└── tests/
├── conftest.py
├── test_cluster.py
├── test_ketama.py
├── test_poll.py
├── test_promise.py
├── test_router.py
└── test_utils.py
SYMBOL INDEX (198 symbols across 15 files)
FILE: rb/clients.py
function assert_open (line 27) | def assert_open(client):
function merge_batch (line 32) | def merge_batch(command_name, arg_promise_tuples):
function auto_batch_commands (line 57) | def auto_batch_commands(commands):
class CommandBuffer (line 84) | class CommandBuffer(object):
method __init__ (line 87) | def __init__(self, host_id, connect, auto_batch=True):
method closed (line 100) | def closed(self):
method connect (line 104) | def connect(self):
method reconnect (line 112) | def reconnect(self):
method fileno (line 124) | def fileno(self):
method enqueue_command (line 131) | def enqueue_command(self, command_name, args, options):
method has_pending_requests (line 139) | def has_pending_requests(self):
method send_buffer (line 145) | def send_buffer(self):
method send_pending_requests (line 183) | def send_pending_requests(self):
method wait_for_responses (line 212) | def wait_for_responses(self, client):
class RoutingPool (line 233) | class RoutingPool(object):
method __init__ (line 239) | def __init__(self, cluster):
method get_connection (line 242) | def get_connection(self, command_name, shard_hint=None):
method release (line 263) | def release(self, connection):
method disconnect (line 276) | def disconnect(self):
method reset (line 279) | def reset(self):
class BaseClient (line 283) | class BaseClient(StrictRedis):
class RoutingBaseClient (line 287) | class RoutingBaseClient(BaseClient):
method __init__ (line 288) | def __init__(self, connection_pool, auto_batch=True):
method pubsub (line 292) | def pubsub(self, **kwargs):
method pipeline (line 295) | def pipeline(self, transaction=True, shard_hint=None):
method lock (line 300) | def lock(self, *args, **kwargs):
class MappingClient (line 304) | class MappingClient(RoutingBaseClient):
method __init__ (line 311) | def __init__(self, connection_pool, max_concurrency=None, auto_batch=T...
method mget (line 323) | def mget(self, keys, *args):
method mset (line 327) | def mset(self, *args, **kwargs):
method execute_command (line 334) | def execute_command(self, *args, **options):
method _get_command_buffer (line 342) | def _get_command_buffer(self, host_id, command_name):
method _release_command_buffer (line 359) | def _release_command_buffer(self, command_buffer):
method _send_or_reconnect (line 368) | def _send_or_reconnect(self, command_buffer):
method _try_reconnect (line 374) | def _try_reconnect(self, command_buffer, err=None):
method join (line 389) | def join(self, timeout=None):
method cancel (line 425) | def cancel(self):
class FanoutClient (line 431) | class FanoutClient(MappingClient):
method __init__ (line 441) | def __init__(self, hosts, connection_pool, max_concurrency=None, auto_...
method target (line 449) | def target(self, hosts):
method target_key (line 464) | def target_key(self, key):
method execute_command (line 478) | def execute_command(self, *args, **options):
class RoutingClient (line 497) | class RoutingClient(RoutingBaseClient):
method __init__ (line 503) | def __init__(self, cluster, auto_batch=True):
method execute_command (line 510) | def execute_command(self, *args, **options):
method get_mapping_client (line 531) | def get_mapping_client(self, max_concurrency=64, auto_batch=None):
method get_fanout_client (line 548) | def get_fanout_client(self, hosts, max_concurrency=64, auto_batch=None):
method map (line 562) | def map(self, timeout=None, max_concurrency=64, auto_batch=None):
method fanout (line 581) | def fanout(self, hosts=None, timeout=None, max_concurrency=64, auto_ba...
class LocalClient (line 607) | class LocalClient(BaseClient):
method __init__ (line 612) | def __init__(self, connection_pool=None, **kwargs):
class MapManager (line 618) | class MapManager(object):
method __init__ (line 621) | def __init__(self, mapping_client, timeout):
method __enter__ (line 626) | def __enter__(self):
method __exit__ (line 630) | def __exit__(self, exc_type, exc_value, tb):
FILE: rb/cluster.py
class HostInfo (line 22) | class HostInfo(object):
method __init__ (line 23) | def __init__(
method __eq__ (line 43) | def __eq__(self, other):
method __ne__ (line 48) | def __ne__(self, other):
method __hash__ (line 54) | def __hash__(self):
method __repr__ (line 57) | def __repr__(self):
function _iter_hosts (line 64) | def _iter_hosts(iterable):
class Cluster (line 77) | class Cluster(object):
method __init__ (line 103) | def __init__(
method add_host (line 132) | def add_host(
method remove_host (line 168) | def remove_host(self, host_id):
method disconnect_pools (line 180) | def disconnect_pools(self):
method get_router (line 187) | def get_router(self):
method get_pool_for_host (line 208) | def get_pool_for_host(self, host_id):
method get_local_client (line 258) | def get_local_client(self, host_id):
method get_local_client_for_key (line 265) | def get_local_client_for_key(self, key):
method get_routing_client (line 271) | def get_routing_client(self, auto_batch=True):
method map (line 289) | def map(self, timeout=None, max_concurrency=64, auto_batch=True):
method fanout (line 309) | def fanout(self, hosts=None, timeout=None, max_concurrency=64, auto_ba...
method all (line 323) | def all(self, timeout=None, max_concurrency=64, auto_batch=True):
method execute_commands (line 338) | def execute_commands(self, mapping, *args, **kwargs):
FILE: rb/ketama.py
function md5_bytes (line 9) | def md5_bytes(key):
class Ketama (line 20) | class Ketama(object):
method __init__ (line 24) | def __init__(self, nodes=None, weights=None):
method _rebuild_circle (line 30) | def _rebuild_circle(self):
method _get_node_pos (line 58) | def _get_node_pos(self, key):
method remove_node (line 73) | def remove_node(self, node):
method add_node (line 82) | def add_node(self, node, weight=1):
method get_node (line 88) | def get_node(self, key):
FILE: rb/poll.py
class BasePoller (line 7) | class BasePoller(object):
method __init__ (line 10) | def __init__(self):
method register (line 13) | def register(self, key, f):
method unregister (line 16) | def unregister(self, key):
method poll (line 19) | def poll(self, timeout=None):
method get (line 22) | def get(self, key):
method __len__ (line 25) | def __len__(self):
method __iter__ (line 28) | def __iter__(self):
class SelectPoller (line 34) | class SelectPoller(BasePoller):
method poll (line 37) | def poll(self, timeout=None):
class PollPoller (line 45) | class PollPoller(BasePoller):
method __init__ (line 48) | def __init__(self):
method register (line 53) | def register(self, key, f):
method unregister (line 60) | def unregister(self, key):
method poll (line 67) | def poll(self, timeout=None):
class KQueuePoller (line 80) | class KQueuePoller(BasePoller):
method __init__ (line 83) | def __init__(self):
method register (line 89) | def register(self, key, f):
method unregister (line 105) | def unregister(self, key):
method poll (line 113) | def poll(self, timeout=None):
class EpollPoller (line 132) | class EpollPoller(BasePoller):
method __init__ (line 135) | def __init__(self):
method register (line 140) | def register(self, key, f):
method unregister (line 147) | def unregister(self, key):
method poll (line 154) | def poll(self, timeout=None):
function _is_closed_select (line 169) | def _is_closed_select(f):
function _is_closed_poll (line 178) | def _is_closed_poll(f):
function _is_closed_kqueue (line 187) | def _is_closed_kqueue(f):
function is_closed (line 200) | def is_closed(f):
FILE: rb/promise.py
class Promise (line 6) | class Promise(object):
method __init__ (line 15) | def __init__(self):
method resolved (line 25) | def resolved(value):
method rejected (line 33) | def rejected(reason):
method all (line 41) | def all(iterable_or_dict):
method resolve (line 49) | def resolve(self, value):
method reject (line 68) | def reject(self, reason):
method is_pending (line 81) | def is_pending(self):
method is_resolved (line 86) | def is_resolved(self):
method is_rejected (line 91) | def is_rejected(self):
method done (line 95) | def done(self, on_success=None, on_failure=None):
method then (line 109) | def then(self, success=None, failure=None):
method __repr__ (line 130) | def __repr__(self):
function _ensure_promise (line 140) | def _ensure_promise(value):
function _promise_from_iterable (line 144) | def _promise_from_iterable(iterable):
function _promise_from_dict (line 163) | def _promise_from_dict(d):
FILE: rb/router.py
class UnroutableCommand (line 8) | class UnroutableCommand(Exception):
class BadHostSetup (line 14) | class BadHostSetup(Exception):
function extract_keys (line 20) | def extract_keys(args, key_spec):
function assert_gapless_hosts (line 32) | def assert_gapless_hosts(hosts):
class BaseRouter (line 42) | class BaseRouter(object):
method __init__ (line 47) | def __init__(self, cluster):
method cluster (line 54) | def cluster(self):
method get_key (line 61) | def get_key(self, command, args):
method get_host_for_command (line 94) | def get_host_for_command(self, command, args):
method get_host_for_key (line 98) | def get_host_for_key(self, key):
class ConsistentHashingRouter (line 106) | class ConsistentHashingRouter(BaseRouter):
method __init__ (line 115) | def __init__(self, cluster):
method get_host_for_key (line 121) | def get_host_for_key(self, key):
class PartitionRouter (line 128) | class PartitionRouter(BaseRouter):
method __init__ (line 136) | def __init__(self, cluster):
method get_host_for_key (line 140) | def get_host_for_key(self, key):
FILE: rb/testing.py
class Server (line 17) | class Server(object):
method __init__ (line 18) | def __init__(self, cl, socket_path):
method test_connection (line 22) | def test_connection(self):
method signal_stop (line 30) | def signal_stop(self):
method close (line 34) | def close(self):
class TestSetup (line 45) | class TestSetup(object):
method __init__ (line 51) | def __init__(self, servers=4, databases_each=8, server_executable="red...
method __enter__ (line 60) | def __enter__(self):
method __exit__ (line 63) | def __exit__(self, exc_type, exc_value, tb):
method make_cluster (line 66) | def make_cluster(self):
method spawn_server (line 87) | def spawn_server(self):
method wait_for_servers (line 106) | def wait_for_servers(self, timeout=10):
method close (line 122) | def close(self):
method __del__ (line 133) | def __del__(self):
function make_test_cluster (line 141) | def make_test_cluster(*args, **kwargs):
FILE: rb/utils.py
function iteritems (line 12) | def iteritems(d, **kw):
function itervalues (line 15) | def itervalues(d, **kw):
function iteritems (line 28) | def iteritems(d, **kw):
function itervalues (line 31) | def itervalues(d, **kw):
function crc32 (line 51) | def crc32(*args):
FILE: tests/conftest.py
function cluster (line 7) | def cluster(request):
FILE: tests/test_cluster.py
function test_basic_interface (line 18) | def test_basic_interface():
function test_router_access (line 46) | def test_router_access():
function test_basic_cluster (line 60) | def test_basic_cluster(cluster):
function test_basic_cluster_disabled_batch (line 74) | def test_basic_cluster_disabled_batch(cluster):
function make_zset_data (line 88) | def make_zset_data(x):
function test_simple_api (line 92) | def test_simple_api(cluster):
function test_routing_client_releases_connection_on_error (line 127) | def test_routing_client_releases_connection_on_error(cluster):
function test_mapping_client_releases_connection_on_error (line 137) | def test_mapping_client_releases_connection_on_error(cluster):
function test_managed_mapping_client_releases_connection_on_error (line 148) | def test_managed_mapping_client_releases_connection_on_error(cluster):
function test_multi_keys_rejected (line 158) | def test_multi_keys_rejected(cluster):
function test_promise_api (line 171) | def test_promise_api(cluster):
function test_fanout_api (line 181) | def test_fanout_api(cluster):
function test_fanout_key_target (line 199) | def test_fanout_key_target(cluster):
function test_fanout_targeting_api (line 210) | def test_fanout_targeting_api(cluster):
function test_emulated_batch_apis (line 222) | def test_emulated_batch_apis(cluster):
function test_batch_promise_all (line 231) | def test_batch_promise_all(cluster):
function test_execute_commands (line 246) | def test_execute_commands(cluster):
function test_reconnect (line 282) | def test_reconnect(cluster):
FILE: tests/test_ketama.py
function test_basic (line 4) | def test_basic():
FILE: tests/test_poll.py
function test_simple_api (line 11) | def test_simple_api(cluster, poll, monkeypatch):
FILE: tests/test_promise.py
function test_resolved_promise (line 4) | def test_resolved_promise():
function test_rejected_promise (line 12) | def test_rejected_promise():
function test_success_callbacks (line 21) | def test_success_callbacks():
function test_failure_callbacks (line 38) | def test_failure_callbacks():
function test_promise_then (line 55) | def test_promise_then():
function test_promise_all (line 65) | def test_promise_all():
function test_auto_coercion (line 95) | def test_auto_coercion():
FILE: tests/test_router.py
function test_router_key_routing (line 7) | def test_router_key_routing():
function test_host_validation (line 21) | def test_host_validation():
function test_router_basics (line 31) | def test_router_basics():
function test_key_extraction (line 44) | def test_key_extraction():
FILE: tests/test_utils.py
function test_crc32 (line 6) | def test_crc32():
Condensed preview — 38 files, each showing path, character count, and a content snippet. Download the .json file or copy for the full structured content (152K chars).
[
{
"path": ".craft.yml",
"chars": 314,
"preview": "minVersion: \"0.18.0\"\ngithub:\n owner: getsentry\n repo: rb\nchangelog: CHANGES\nchangelogPolicy: auto\nstatusProvider:\n na"
},
{
"path": ".github/workflows/build.yml",
"chars": 538,
"preview": "name: build\n\non:\n push:\n branches:\n - master\n - release/**\n\njobs:\n dist:\n name: Wheels\n runs-on: ub"
},
{
"path": ".github/workflows/release.yml",
"chars": 1107,
"preview": "name: release\n\non:\n workflow_dispatch:\n inputs:\n version:\n description: Version to release\n requi"
},
{
"path": ".github/workflows/test.yml",
"chars": 1524,
"preview": "name: test\n\non:\n push:\n branches:\n - master\n - release/**\n pull_request:\n\njobs:\n test:\n name: Run tes"
},
{
"path": ".gitignore",
"chars": 64,
"preview": "docs/_build\n*.pyc\n*.pyo\n.DS_Store\n.cache/\nbuild\ndist\n*.egg-info\n"
},
{
"path": ".python-version",
"chars": 4,
"preview": "3.8\n"
},
{
"path": "CHANGES",
"chars": 2423,
"preview": "Rb Changelog\n============\n\n1.10.0\n------\n\n### Various fixes & improvements\n\n- add internal pypi deploy to `rb` (#54) by "
},
{
"path": "LICENSE",
"chars": 10875,
"preview": "\n Apache License\n Version 2.0, January 2004\n htt"
},
{
"path": "Makefile",
"chars": 141,
"preview": "setup-git:\n\t@echo \"--> Installing git hooks\"\n\t@pip install flake8\n\t@cd .git/hooks && ln -sf ../../hooks/* ./\n\ntest:\n\t@py"
},
{
"path": "README.md",
"chars": 1084,
"preview": "# rb [](https://github.com/getsentry/rb/act"
},
{
"path": "docs/Makefile",
"chars": 4569,
"preview": "# Makefile for Sphinx documentation\n#\n\n# You can set these variables from the command line.\nSPHINXOPTS =\nSPHINXBUILD "
},
{
"path": "docs/_themes/rb_theme/layout.html",
"chars": 683,
"preview": "{% extends \"basic/layout.html\" %}\n{% block header %}\n {{ super() }}\n {% if pagename == 'index' %}\n <div class=indexwr"
},
{
"path": "docs/_themes/rb_theme/static/rb.css_t",
"chars": 4442,
"preview": "@import url(\"basic.css\");\n@import url(http://fonts.googleapis.com/css?family=Roboto+Mono:400,700italic,700,400italic);\n "
},
{
"path": "docs/_themes/rb_theme/theme.conf",
"chars": 102,
"preview": "[theme]\ninherit = basic\nstylesheet = rb.css\nnosidebar = true\n\n[options]\nindex_logo = ''\ngithub_fork =\n"
},
{
"path": "docs/conf.py",
"chars": 6946,
"preview": "# -*- coding: utf-8 -*-\n#\n# rb documentation build configuration file, created by\n# sphinx-quickstart on Mon Apr 26 19:5"
},
{
"path": "docs/index.rst",
"chars": 4675,
"preview": "rb: the redis blaster\n=====================\n\n.. module:: rb\n\nRb, the redis blaster, is a library that implements non-rep"
},
{
"path": "docs/make.bat",
"chars": 4106,
"preview": "@ECHO OFF\n\nREM Command file for Sphinx documentation\n\nif \"%SPHINXBUILD%\" == \"\" (\n\tset SPHINXBUILD=sphinx-build\n)\nset BUI"
},
{
"path": "hooks/pre-commit",
"chars": 1238,
"preview": "#!/usr/bin/env python\n\nimport glob\nimport os\nimport sys\n\nos.environ['PYFLAKES_NODOCTEST'] = '1'\n\n# pep8.py uses sys.argv"
},
{
"path": "rb/__init__.py",
"chars": 687,
"preview": "\"\"\"\n rb\n ~~\n\n The redis blaster.\n\n :copyright: (c) 2015 Functional Software Inc.\n :license: Apache Licens"
},
{
"path": "rb/_rediscommands.py",
"chars": 16051,
"preview": "# flake8: noqa\n\nCOMMANDS = {\n \"APPEND\": {\"arity\": 3, \"flags\": [\"write\", \"denyoom\"], \"key_spec\": (1, 1, 1)},\n \"AUTH"
},
{
"path": "rb/clients.py",
"chars": 22416,
"preview": "import time\nimport errno\nimport socket\n\nfrom weakref import ref as weakref\n\nfrom redis import StrictRedis\nfrom redis.cli"
},
{
"path": "rb/cluster.py",
"chars": 16574,
"preview": "from redis.connection import ConnectionPool, UnixDomainSocketConnection\n\ntry:\n from redis.commands.core import Script"
},
{
"path": "rb/ketama.py",
"chars": 2603,
"preview": "import hashlib\nimport math\n\nfrom bisect import bisect\n\nfrom rb.utils import text_type, integer_types, bytes_type\n\n\ndef m"
},
{
"path": "rb/poll.py",
"chars": 6096,
"preview": "import fcntl\nimport array\nimport select\nimport termios\n\n\nclass BasePoller(object):\n is_available = False\n\n def __i"
},
{
"path": "rb/promise.py",
"chars": 5350,
"preview": "from functools import partial\n\nfrom rb.utils import iteritems\n\n\nclass Promise(object):\n \"\"\"A promise object that atte"
},
{
"path": "rb/router.py",
"chars": 4699,
"preview": "from weakref import ref as weakref\n\nfrom rb.ketama import Ketama\nfrom rb.utils import text_type, bytes_type, integer_typ"
},
{
"path": "rb/testing.py",
"chars": 4454,
"preview": "import os\nimport time\nimport uuid\nimport shutil\nimport socket\nimport tempfile\n\nfrom contextlib import contextmanager\nfro"
},
{
"path": "rb/utils.py",
"chars": 1449,
"preview": "from __future__ import absolute_import\n\nimport sys\n\nPY2 = sys.version_info[0] == 2\n\nif PY2:\n integer_types = (int, lo"
},
{
"path": "scripts/bump-version.sh",
"chars": 265,
"preview": "#!/bin/bash\nset -eu\n\nSCRIPT_DIR=\"$( cd \"$( dirname \"${BASH_SOURCE[0]}\" )\" && pwd )\"\ncd $SCRIPT_DIR/..\n\nOLD_VERSION=\"$1\"\n"
},
{
"path": "setup.cfg",
"chars": 28,
"preview": "[bdist_wheel]\nuniversal = 1\n"
},
{
"path": "setup.py",
"chars": 989,
"preview": "import re\nimport ast\nimport os\nfrom setuptools import setup\n\n\n_version_re = re.compile(r\"__version__\\s+=\\s+(.*)\")\n\n\nwith"
},
{
"path": "tests/conftest.py",
"chars": 261,
"preview": "import pytest\n\nfrom rb.testing import make_test_cluster\n\n\n@pytest.fixture\ndef cluster(request):\n mgr = make_test_clus"
},
{
"path": "tests/test_cluster.py",
"chars": 9170,
"preview": "import time\nimport pytest\n\nimport redis\nfrom redis.exceptions import ResponseError\n\nfrom rb.cluster import Cluster\nfrom "
},
{
"path": "tests/test_ketama.py",
"chars": 1217,
"preview": "from rb.ketama import Ketama\n\n\ndef test_basic():\n def test(k):\n data = {}\n for i in range(1000):\n "
},
{
"path": "tests/test_poll.py",
"chars": 541,
"preview": "import pytest\n\nfrom rb import clients\nfrom rb.poll import available_pollers\nfrom rb.utils import text_type\n\n\n@pytest.mar"
},
{
"path": "tests/test_promise.py",
"chars": 2141,
"preview": "from rb.promise import Promise\n\n\ndef test_resolved_promise():\n p = Promise.resolved(42)\n assert p.is_resolved\n "
},
{
"path": "tests/test_router.py",
"chars": 1589,
"preview": "import pytest\n\nfrom rb.cluster import Cluster\nfrom rb.router import UnroutableCommand, extract_keys, BadHostSetup\n\n\ndef "
},
{
"path": "tests/test_utils.py",
"chars": 200,
"preview": "import pytest\n\nfrom rb.utils import bytes_type, crc32\n\n\ndef test_crc32():\n \"\"\"\n Test that we get consistent values"
}
]
About this extraction
This page contains the full source code of the getsentry/rb GitHub repository, extracted and formatted as plain text for AI agents and large language models (LLMs). The extraction includes 38 files (138.3 KB), approximately 37.3k tokens, and a symbol index with 198 extracted functions, classes, methods, constants, and types. Use this with OpenClaw, Claude, ChatGPT, Cursor, Windsurf, or any other AI tool that accepts text input. You can copy the full output to your clipboard or download it as a .txt file.
Extracted by GitExtract — free GitHub repo to text converter for AI. Built by Nikandr Surkov.