[
  {
    "path": ".editorconfig",
    "content": "# See http://editorconfig.org for format details and\n# http://editorconfig.org/#download for editor / IDE integration\n\nroot = true\n\n[*]\nindent_style = space\nindent_size = 4\ninsert_final_newline = true\ntrim_trailing_whitespace = true\nend_of_line = lf\ncharset = utf-8\n\n# Makefiles always use tabs for indentation\n[Makefile]\nindent_style = tab\n\n# We don't want to apply our defaults to third-party code or minified bundles:\n[**/{external,vendor}/**,**.min.{js,css}]\nindent_style = ignore\nindent_size = ignore\n"
  },
  {
    "path": ".gitchangelog.rc",
    "content": "##\n## Format\n##\n##   ACTION: [AUDIENCE:] COMMIT_MSG [!TAG ...]\n##\n## Description\n##\n##   ACTION is one of 'chg', 'fix', 'new'\n##\n##       Is WHAT the change is about.\n##\n##       'chg' is for refactor, small improvement, cosmetic changes...\n##       'fix' is for bug fixes\n##       'new' is for new features, big improvement\n##\n##   AUDIENCE is optional and one of 'dev', 'usr', 'pkg', 'test', 'doc'\n##\n##       Is WHO is concerned by the change.\n##\n##       'dev'  is for developpers (API changes, refactors...)\n##       'usr'  is for final users (UI changes)\n##       'pkg'  is for packagers   (packaging changes)\n##       'test' is for testers     (test only related changes)\n##       'doc'  is for doc guys    (doc only changes)\n##\n##   COMMIT_MSG is ... well ... the commit message itself.\n##\n##   TAGs are additionnal adjective as 'refactor' 'minor' 'cosmetic'\n##\n##       They are preceded with a '!' or a '@' (prefer the former, as the\n##       latter is wrongly interpreted in github.) Commonly used tags are:\n##\n##       'refactor' is obviously for refactoring code only\n##       'minor' is for a very meaningless change (a typo, adding a comment)\n##       'cosmetic' is for cosmetic driven change (re-indentation, 80-col...)\n##       'wip' is for partial functionality but complete subfunctionality.\n##\n## Example:\n##\n##   new: usr: support of bazaar implemented\n##   chg: re-indentend some lines !cosmetic\n##   new: dev: updated code to be compatible with last version of killer lib.\n##   fix: pkg: updated year of licence coverage.\n##   new: test: added a bunch of test around user usability of feature X.\n##   fix: typo in spelling my name in comment. !minor\n##\n##   Please note that multi-line commit message are supported, and only the\n##   first line will be considered as the \"summary\" of the commit message. So\n##   tags, and other rules only applies to the summary.  The body of the commit\n##   message will be displayed in the changelog without reformatting.\n\n\n##\n## ``ignore_regexps`` is a line of regexps\n##\n## Any commit having its full commit message matching any regexp listed here\n## will be ignored and won't be reported in the changelog.\n##\nignore_regexps = [\n        r'@minor', r'!minor',\n        r'@cosmetic', r'!cosmetic',\n        r'@refactor', r'!refactor',\n        r'@wip', r'!wip',\n        r'^([cC]hg|[fF]ix|[nN]ew)\\s*:\\s*[p|P]kg:',\n        r'^([cC]hg|[fF]ix|[nN]ew)\\s*:\\s*[d|D]ev:',\n        r'^(.{3,3}\\s*:)?\\s*[fF]irst commit.?\\s*$',\n  ]\n\n\n## ``section_regexps`` is a list of 2-tuples associating a string label and a\n## list of regexp\n##\n## Commit messages will be classified in sections thanks to this. Section\n## titles are the label, and a commit is classified under this section if any\n## of the regexps associated is matching.\n##\nsection_regexps = [\n    ('New', [\n\tr'^[nN]ew\\s*:\\s*((dev|use?r|pkg|test|doc)\\s*:\\s*)?([^\\n]*)$',\n     ]),\n    ('Changes', [\n        r'^[cC]hg\\s*:\\s*((dev|use?r|pkg|test|doc)\\s*:\\s*)?([^\\n]*)$',\n     ]),\n    ('Fix', [\n        r'^[fF]ix\\s*:\\s*((dev|use?r|pkg|test|doc)\\s*:\\s*)?([^\\n]*)$',\n     ]),\n\n    ('Other', None ## Match all lines\n     ),\n\n]\n\n\n## ``body_process`` is a callable\n##\n## This callable will be given the original body and result will\n## be used in the changelog.\n##\n## Available constructs are:\n##\n##   - any python callable that take one txt argument and return txt argument.\n##\n##   - ReSub(pattern, replacement): will apply regexp substitution.\n##\n##   - Indent(chars=\"  \"): will indent the text with the prefix\n##     Please remember that template engines gets also to modify the text and\n##     will usually indent themselves the text if needed.\n##\n##   - Wrap(regexp=r\"\\n\\n\"): re-wrap text in separate paragraph to fill 80-Columns\n##\n##   - noop: do nothing\n##\n##   - ucfirst: ensure the first letter is uppercase.\n##     (usually used in the ``subject_process`` pipeline)\n##\n##   - final_dot: ensure text finishes with a dot\n##     (usually used in the ``subject_process`` pipeline)\n##\n##   - strip: remove any spaces before or after the content of the string\n##\n## Additionally, you can `pipe` the provided filters, for instance:\n#body_process = Wrap(regexp=r'\\n(?=\\w+\\s*:)') | Indent(chars=\"  \")\n#body_process = Wrap(regexp=r'\\n(?=\\w+\\s*:)')\n#body_process = noop\nbody_process = ReSub(r'((^|\\n)[A-Z]\\w+(-\\w+)*: .*(\\n\\s+.*)*)+$', r'') | strip\n\n\n## ``subject_process`` is a callable\n##\n## This callable will be given the original subject and result will\n## be used in the changelog.\n##\n## Available constructs are those listed in ``body_process`` doc.\nsubject_process = (strip |\n    ReSub(r'^([cC]hg|[fF]ix|[nN]ew)\\s*:\\s*((dev|use?r|pkg|test|doc)\\s*:\\s*)?([^\\n@]*)(@[a-z]+\\s+)*$', r'\\4') |\n    ucfirst | final_dot)\n\n\n## ``tag_filter_regexp`` is a regexp\n##\n## Tags that will be used for the changelog must match this regexp.\n##\ntag_filter_regexp = r'^v[0-9]+\\.[0-9]+(\\.[0-9]+)?$'\n\n\n## ``unreleased_version_label`` is a string\n##\n## This label will be used as the changelog Title of the last set of changes\n## between last valid tag and HEAD if any.\nunreleased_version_label = \"%%version%% (unreleased)\"\n\n\n## ``output_engine`` is a callable\n##\n## This will change the output format of the generated changelog file\n##\n## Available choices are:\n##\n##   - rest_py\n##\n##        Legacy pure python engine, outputs ReSTructured text.\n##        This is the default.\n##\n##   - mustache(<template_name>)\n##\n##        Template name could be any of the available templates in\n##        ``templates/mustache/*.tpl``.\n##        Requires python package ``pystache``.\n##        Examples:\n##           - mustache(\"markdown\")\n##           - mustache(\"restructuredtext\")\n##\n##   - makotemplate(<template_name>)\n##\n##        Template name could be any of the available templates in\n##        ``templates/mako/*.tpl``.\n##        Requires python package ``mako``.\n##        Examples:\n##           - makotemplate(\"restructuredtext\")\n##\noutput_engine = rest_py\n#output_engine = mustache(\"restructuredtext\")\n#output_engine = mustache(\"markdown\")\n#output_engine = makotemplate(\"restructuredtext\")\n\n\n## ``include_merge`` is a boolean\n##\n## This option tells git-log whether to include merge commits in the log.\n## The default is to include them.\ninclude_merge = True\n"
  },
  {
    "path": ".github/issue_template.md",
    "content": "* [ ] Tested with the latest Haystack release\n* [ ] Tested with the current Haystack master branch\n\n## Expected behaviour\n\n## Actual behaviour\n\n## Steps to reproduce the behaviour\n\n1.\n\n## Configuration\n\n* Operating system version:\n* Search engine version:\n* Python version:\n* Django version:\n* Haystack version:"
  },
  {
    "path": ".github/pull_request_template.md",
    "content": "# Hey, thanks for contributing to Haystack. Please review [the contributor guidelines](https://django-haystack.readthedocs.io/en/latest/contributing.html) and confirm that [the tests pass](https://django-haystack.readthedocs.io/en/latest/running_tests.html) with at least one search engine.\n\n# Once your pull request has been submitted, the full test suite will be executed on https://github.com/django-haystack/django-haystack/actions/workflows/test.yml. Pull requests with passing tests are far more likely to be reviewed and merged."
  },
  {
    "path": ".github/workflows/black+isort.yml",
    "content": "name: black+isort\n\non: [pull_request, push]\n\njobs:\n  check:\n    runs-on: ubuntu-latest\n    steps:\n    - uses: actions/checkout@v2\n    - name: Set up Python\n      uses: actions/setup-python@v2\n      with:\n        python-version: 3.9\n    - name: Install tools\n      run: pip install black isort\n    - name: Run black+isort\n      run: |\n        black --check --diff .\n        isort --check .\n"
  },
  {
    "path": ".github/workflows/codeql-analysis.yml",
    "content": "name: \"CodeQL\"\n\non:\n  push:\n    branches: [master, ]\n  pull_request:\n    # The branches below must be a subset of the branches above\n    branches: [master]\n  schedule:\n    - cron: '0 6 * * 5'\n\njobs:\n  analyze:\n    name: Analyze\n    runs-on: ubuntu-latest\n\n    steps:\n    - name: Checkout repository\n      uses: actions/checkout@v2\n\n    # Initializes the CodeQL tools for scanning.\n    - name: Initialize CodeQL\n      uses: github/codeql-action/init@v1\n      with:\n        languages: python\n\n    - name: Perform CodeQL Analysis\n      uses: github/codeql-action/analyze@v1\n"
  },
  {
    "path": ".github/workflows/docs.yml",
    "content": "name: Build docs\n\non: [pull_request, push]\n\njobs:\n  build:\n    runs-on: ubuntu-latest\n    steps:\n    - uses: actions/checkout@v2\n    - name: Set up Python\n      uses: actions/setup-python@v2\n      with:\n        python-version: 3.9\n    - name: Install dependencies\n      run: pip install sphinx\n    - name: Build docs\n      run: cd docs && make html\n"
  },
  {
    "path": ".github/workflows/flake8.yml",
    "content": "name: flake8\n\non: [pull_request, push]\n\njobs:\n  check:\n    runs-on: ubuntu-latest\n    steps:\n    - uses: actions/checkout@v2\n    - name: Set up Python\n      uses: actions/setup-python@v2\n      with:\n        python-version: 3.9\n    - name: Install tools\n      run: pip install flake8 flake8-assertive flake8-bugbear flake8-builtins flake8-comprehensions flake8-logging-format\n    - name: Run flake8\n      run: flake8 example_project haystack\n"
  },
  {
    "path": ".github/workflows/test.yml",
    "content": "name: Test\n\non: [pull_request, push]\n\njobs:\n  test:\n\n    runs-on: ubuntu-latest\n    strategy:\n      matrix:\n        django-version: [2.2, 3.1, 3.2]\n        python-version: [3.6, 3.7, 3.8, 3.9]\n        elastic-version: [1.7, 2.4, 5.5]\n        include:\n          - django-version: 2.2\n            python-version: 3.5\n            elastic-version: 1.7\n          - django-version: 2.2\n            python-version: 3.5\n            elastic-version: 2.4\n          - django-version: 2.2\n            python-version: 3.5\n            elastic-version: 5.5\n    services:\n      elastic:\n        image: elasticsearch:${{ matrix.elastic-version }}\n        ports:\n          - 9200:9200\n      solr:\n        image: solr:6\n        ports:\n          - 9001:9001\n    steps:\n    - uses: actions/checkout@v2\n    - name: Set up Python ${{ matrix.python-version }}\n      uses: actions/setup-python@v2\n      with:\n        python-version: ${{ matrix.python-version }}\n    - name: Install system dependencies\n      run: sudo apt install --no-install-recommends -y gdal-bin\n    - name: Install dependencies\n      run: |\n        python -m pip install --upgrade pip setuptools wheel\n        pip install coverage requests\n        pip install django==${{ matrix.django-version }} elasticsearch==${{ matrix.elastic-version }}\n        python setup.py clean build install\n    - name: Run test\n      run: coverage run setup.py test\n\n"
  },
  {
    "path": ".gitignore",
    "content": ".settings\n*.pyc\n.DS_Store\n_build\n.*.sw[po]\n*.egg-info\ndist\nbuild\nMANIFEST\n.tox\nenv\nenv3\n*.egg\n.eggs\n.coverage\n.idea\n\n# Build artifacts from test setup\n*.tgz\ntest_haystack/solr_tests/server/solr4/\n"
  },
  {
    "path": "AUTHORS",
    "content": "Primary Authors:\n\n    * Daniel Lindsley\n    * Matt Croydon (some documentation, sanity checks and the sweet name)\n    * Travis Cline (the original SQ implementation, improvements to ModelSearchIndex)\n    * David Sauve (notanumber) for the Xapian backend, the simple backend and various patches.\n    * Jannis Leidel (jezdez)\n    * Chris Adams (acdha)\n    * Justin Caratzas (bigjust)\n    * Andrew Schoen (andrewschoen)\n    * Dan Watson (dcwatson)\n    * Matt Woodward (mpwoodward)\n    * Alex Vidal (avidal)\n    * Zach Smith (zmsmith)\n    * Stefan Wehrmeyer (stefanw)\n    * George Hickman (ghickman)\n    * Ben Spaulding (benspaulding)\n\n\nThanks to\n    * Jacob Kaplan-Moss & Joseph Kocherhans for the original implementation of\n      djangosearch, of which portions were used, as well as basic API feedback.\n    * Christian Metts for designing the logo and building a better site.\n    * Nathan Borror for testing and advanced form usage.\n    * Malcolm Tredinnick for API feedback.\n    * Mediaphormedia for funding the development on More Like This and faceting.\n    * Travis Cline for API feedback, Git help and improvements to the reindex command.\n    * Brian Rosner for various patches.\n    * Richard Boulton for feedback and suggestions.\n    * Cyberdelia for feedback and patches.\n    * Ask Solem for for patching the setup.py.\n    * Ben Spaulding for feedback and documentation patches.\n    * smulloni for various patches.\n    * JoeGermuska for various patches.\n    * SmileyChris for various patches.\n    * sk1p for various patches.\n    * Ryszard Szopa (ryszard) for various patches.\n    * Patryk Zawadzki (patrys) for various patches and feedback.\n    * Frank Wiles for documentation patches.\n    * Chris Adams (acdha) for various patches.\n    * Kyle MacFarlane for various patches.\n    * Alex Gaynor (alex) for help with handling deferred models with More Like This.\n    * RobertGawron for a patch to the Highlighter.\n    * Simon Willison (simonw) for various proposals and patches.\n    * Ben Firshman (bfirsh) for faceting improvements and suggestions.\n    * Peter Bengtsson for a patch regarding passing a customized site.\n    * Sam Bull (osirius) for a patch regarding initial data on SearchForms.\n    * slai for a patch regarding Whoosh and fetching all documents of a certain model type.\n    * alanwj for a patch regarding Whoosh and empty MultiValueFields.\n    * alanzoppa for a patch regarding highlighting.\n    * piquadrat for a patch regarding the more_like_this template tag.\n    * dedsm for a patch regarding the pickling of SearchResult objects.\n    * EmilStenstrom for a patch to the Highlighter.\n    * symroe for a patch regarding the more_like_this template tag.\n    * ghostrocket for a patch regarding the simple backend.\n    * Rob Hudson (robhudson) for improvements to the admin search.\n    * apollo13 for simplifying ``SearchForm.__init__``.\n    * Carl Meyer (carljm) for a patch regarding character primary keys.\n    * oyiptong for a patch regarding pickling.\n    * alfredo for a patch to generate epub docs.\n    * Luke Hatcher (lukeman) for documentation patches.\n    * Trey Hunner (treyhunner) for a Whoosh field boosting patch.\n    * Kent Gormat of Retail Catalyst for funding the development of multiple index support.\n    * Gidsy for funding the initial geospatial implementation\n    * CMGdigital for funding the development on:\n        * a multiprocessing-enabled version of ``update_index``.\n        * the addition of ``--start/--end`` options in ``update_index``.\n        * the ability to specify both apps & models to ``update_index``.\n        * A significant portion of the geospatial feature.\n        * A significant portion of the input types feature.\n    * Aram Dulyan (Aramgutang) for fixing the included admin class to be Django 1.4 compatible.\n    * Honza Kral (HonzaKral) for various Elasticsearch tweaks & testing.\n    * Alex Vidal (avidal) for a patch allowing developers to override the queryset used for update operations.\n    * Igor Támara (ikks) for a patch related to Unicode ``verbose_name_plural``.\n    * Dan Helfman (witten) for a patch related to highlighting.\n    * Matt DeBoard for refactor of ``SolrSearchBackend.search`` method to allow simpler extension of the class.\n    * Rodrigo Guzman (rz) for a fix to query handling in the ``simple`` backend.\n    * Martin J. Laubach (mjl) for fixing the logic used when combining querysets\n    * Eric Holscher (ericholscher) for a docs fix.\n    * Erik Rose (erikrose) for a quick pyelasticsearch-compatibility patch\n    * Stefan Wehrmeyer (stefanw) for a simple search filter fix\n    * Dan Watson (dcwatson) for various patches.\n    * Andrew Schoen (andrewschoen) for the addition of ``HAYSTACK_IDENTIFIER_METHOD``\n    * Pablo SEMINARIO (pabluk) for a docs fix, and a fix in the ElasticSearch backend.\n    * Eric Thurgood (ethurgood) for a import fix in the Elasticssearch backend.\n    * Revolution Systems & The Python Software Foundation for funding a significant portion of the port to Python 3!\n    * Artem Kostiuk (postatum) for patch allowing to search for slash character in ElasticSearch since Lucene 4.0.\n    * Luis Barrueco (luisbarrueco) for a simple fix regarding updating indexes using multiple backends.\n    * Szymon Teżewski (jasisz) for an update to the bounding-box calculation for spatial queries\n    * Chris Wilson (qris) and Orlando Fiol (overflow) for an update allowing the use of multiple order_by()\n      fields with Whoosh as long as they share a consistent sort direction\n    * Steven Skoczen (@skoczen) for an ElasticSearch bug fix\n    * @Xaroth for updating the app loader to be compatible with Django 1.7\n    * Jaroslav Gorjatsev (jarig) for a bugfix with index_fieldname\n    * Dirk Eschler (@deschler) for app loader Django 1.7 compatibility fixes\n    * Wictor (wicol) for a patch improving the error message given when model_attr references a non-existent\n      field\n    * Pierre Dulac (dulaccc) for a patch updating distance filters for ElasticSearch 1.x\n    * Andrei Fokau (andreif) for adding support for ``SQ`` in ``SearchQuerySet.narrow()``\n    * Phill Tornroth (phill-tornroth) for several patches improving UnifiedIndex and ElasticSearch support\n    * Philippe Luickx (philippeluickx) for documenting how to provide backend-specific facet options\n    * Felipe Prenholato (@chronossc) for a patch making it easy to exclude documents from indexing using custom logic\n    * Alfredo Armanini (@phingage) for a patch fixing compatibility with database API changes in Django 1.8\n    * Ben Spaulding (@benspaulding) for many updates for Django 1.8 support\n    * Troy Grosfield (@troygrosfield) for fixing the test runner for Django 1.8\n    * Ilan Steemers (@Koed00) for fixing Django 1.9 deprecation warnings\n    * Ana Carolina (@anacarolinats) and Steve Bussetti (@sbussetti) for adding the ``fuzzy`` operator to\n      SearchQuerySet\n    * Tadas Dailyda (@skirsdeda) for various patches\n    * Craig de Stigter (@craigds) for a patch fixing concurrency issues when building UnifiedIndex\n    * Claude Paroz (@claudep) for Django 1.9 support\n    * Chris Brooke (@chrisbrooke) for patching around a backwards-incompatible change in ElasticSearch 2\n    * Gilad Beeri (@giladbeeri) for adding retries when updating a backend\n    * Arjen Verstoep (@terr) for a patch that allows attribute lookups through Django ManyToManyField relationships\n    * Tim Babych (@tymofij) for enabling backend-specific parameters in ``.highlight()``\n    * Antony Raj (@antonyr) for adding endswith input type and fixing contains input type\n    * Morgan Aubert (@ellmetha) for Django 1.10 support\n    * João Junior (@joaojunior) and Bruno Marques (@ElSaico) for Elasticsearch 2.x support\n    * Alex Tomkins (@tomkins) for various patches\n    * Martin Pauly (@mpauly) for Django 2.0 support\n    * Ryan Jarvis (@cabalist) for some code cleanup\n    * Dulmandakh Sukhbaatar (@dulmandakh) for GitHub Actions support, and flake8, black, isort checks.\n    * Deniz Dogan (@denizdogan) for adding support for the ``analyzer`` parameter for the Whoosh backend\n"
  },
  {
    "path": "CONTRIBUTING.md",
    "content": "# Contributing\n\nHaystack is open-source and, as such, grows (or shrinks) & improves in part\ndue to the community. Below are some guidelines on how to help with the project.\n\n## Philosophy\n\n-   Haystack is BSD-licensed. All contributed code must be either\n    -   the original work of the author, contributed under the BSD, or...\n    -   work taken from another project released under a BSD-compatible license.\n-   GPL'd (or similar) works are not eligible for inclusion.\n-   Haystack's git master branch should always be stable, production-ready &\n    passing all tests.\n-   Major releases (1.x.x) are commitments to backward-compatibility of the public APIs.\n    Any documented API should ideally not change between major releases.\n    The exclusion to this rule is in the event of either a security issue\n    or to accommodate changes in Django itself.\n-   Minor releases (x.3.x) are for the addition of substantial features or major\n    bugfixes.\n-   Patch releases (x.x.4) are for minor features or bugfixes.\n\n## Guidelines For Reporting An Issue/Feature\n\nSo you've found a bug or have a great idea for a feature. Here's the steps you\nshould take to help get it added/fixed in Haystack:\n\n-   First, check to see if there's an existing issue/pull request for the\n    bug/feature. All issues are at https://github.com/toastdriven/django-haystack/issues\n    and pull reqs are at https://github.com/toastdriven/django-haystack/pulls.\n-   If there isn't one there, please file an issue. The ideal report includes:\n    -   A description of the problem/suggestion.\n    -   How to recreate the bug.\n    -   If relevant, including the versions of your:\n        -   Python interpreter\n        -   Django\n        -   Haystack\n        -   Search engine used (as well as bindings)\n        -   Optionally of the other dependencies involved\n-   Ideally, creating a pull request with a (failing) test case demonstrating\n    what's wrong. This makes it easy for us to reproduce & fix the problem.\n\n    Github has a great guide for writing an effective pull request:\n    https://github.com/blog/1943-how-to-write-the-perfect-pull-request\n\n    Instructions for running the tests are at\n    https://django-haystack.readthedocs.io/en/latest/running_tests.html\n\nYou might also hop into the IRC channel (`#haystack` on `irc.freenode.net`)\n& raise your question there, as there may be someone who can help you with a\nwork-around.\n\n## Guidelines For Contributing Code\n\nIf you're ready to take the plunge & contribute back some code/docs, the\nprocess should look like:\n\n-   Fork the project on GitHub into your own account.\n-   Clone your copy of Haystack.\n-   Make a new branch in git & commit your changes there.\n-   Push your new branch up to GitHub.\n-   Again, ensure there isn't already an issue or pull request out there on it.\n    If there is & you feel you have a better fix, please take note of the issue\n    number & mention it in your pull request.\n-   Create a new pull request (based on your branch), including what the\n    problem/feature is, versions of your software & referencing any related\n    issues/pull requests.\n\nIn order to be merged into Haystack, contributions must have the following:\n\n-   A solid patch that:\n    -   is clear.\n    -   works across all supported versions of Python/Django.\n    -   follows the existing style of the code base formatted with\n        [`isort`](https://pypi.org/project/isort/) and\n        [`Black`](https://pypi.org/project/black/) using the provided\n        configuration in the repo\n    -   comments included as needed to explain why the code functions as it does\n-   A test case that demonstrates the previous flaw that now passes\n    with the included patch.\n-   If it adds/changes a public API, it must also include documentation\n    for those changes.\n-   Must be appropriately licensed (see [Philosophy](#philosophy)).\n-   Adds yourself to the AUTHORS file.\n\nIf your contribution lacks any of these things, they will have to be added\nby a core contributor before being merged into Haystack proper, which may take\nsubstantial time for the all-volunteer team to get to.\n"
  },
  {
    "path": "LICENSE",
    "content": "Copyright (c) 2009-2013, Daniel Lindsley.\nAll rights reserved.\n\nRedistribution and use in source and binary forms, with or without modification,\nare permitted provided that the following conditions are met:\n\n    1. Redistributions of source code must retain the above copyright notice,\n       this list of conditions and the following disclaimer.\n\n    2. Redistributions in binary form must reproduce the above copyright\n       notice, this list of conditions and the following disclaimer in the\n       documentation and/or other materials provided with the distribution.\n\n    3. Neither the name of Haystack nor the names of its contributors may be used\n       to endorse or promote products derived from this software without\n       specific prior written permission.\n\nTHIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS \"AS IS\" AND\nANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED\nWARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE\nDISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE LIABLE FOR\nANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES\n(INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES;\nLOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON\nANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT\n(INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS\nSOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.\n\n---\n\nPrior to April 17, 2009, this software was released under the MIT license.\n"
  },
  {
    "path": "MANIFEST.in",
    "content": "recursive-include docs *\nrecursive-include haystack/templates *.xml *.html\ninclude AUTHORS\ninclude LICENSE\ninclude README.rst\n"
  },
  {
    "path": "README.rst",
    "content": ".. image:: https://github.com/django-haystack/django-haystack/actions/workflows/test.yml/badge.svg\n      :target: https://github.com/django-haystack/django-haystack/actions/workflows/test.yml\n.. image:: https://img.shields.io/pypi/v/django-haystack.svg\n      :target: https://pypi.python.org/pypi/django-haystack/\n.. image:: https://img.shields.io/pypi/pyversions/django-haystack.svg\n      :target: https://pypi.python.org/pypi/django-haystack/\n.. image:: https://img.shields.io/pypi/dm/django-haystack.svg\n      :target: https://pypi.python.org/pypi/django-haystack/\n.. image:: https://readthedocs.org/projects/django-haystack/badge/\n      :target: https://django-haystack.readthedocs.io/\n.. image:: https://img.shields.io/badge/code%20style-black-000.svg\n      :target: https://github.com/psf/black\n.. image:: https://img.shields.io/badge/%20imports-isort-%231674b1?style=flat&labelColor=ef8336\n      :target: https://pycqa.github.io/isort/\n\n========\nHaystack\n========\n\n:author: Daniel Lindsley\n:date: 2013/07/28\n\nHaystack provides modular search for Django. It features a unified, familiar\nAPI that allows you to plug in different search backends (such as Solr_,\nElasticsearch_, Whoosh_, Xapian_, etc.) without having to modify your code.\n\n.. _Solr: http://lucene.apache.org/solr/\n.. _Elasticsearch: https://www.elastic.co/products/elasticsearch\n.. _Whoosh: https://github.com/mchaput/whoosh/\n.. _Xapian: http://xapian.org/\n\nHaystack is BSD licensed, plays nicely with third-party app without needing to\nmodify the source and supports advanced features like faceting, More Like This,\nhighlighting, spatial search and spelling suggestions.\n\nYou can find more information at http://haystacksearch.org/.\n\n\nGetting Help\n============\n\nThere is a mailing list (http://groups.google.com/group/django-haystack/)\navailable for general discussion and an IRC channel (#haystack on\nirc.freenode.net).\n\n\nDocumentation\n=============\n\n* Development version: http://docs.haystacksearch.org/\n* v2.8.X: https://django-haystack.readthedocs.io/en/v2.8.1/\n* v2.7.X: https://django-haystack.readthedocs.io/en/v2.7.0/\n* v2.6.X: https://django-haystack.readthedocs.io/en/v2.6.0/\n\nSee the `changelog <docs/changelog.rst>`_\n\nRequirements\n============\n\nHaystack has a relatively easily-met set of requirements.\n\n* Python 3.5+\n* A supported version of Django: https://www.djangoproject.com/download/#supported-versions\n\nAdditionally, each backend has its own requirements. You should refer to\nhttps://django-haystack.readthedocs.io/en/latest/installing_search_engines.html for more\ndetails.\n"
  },
  {
    "path": "docs/Makefile",
    "content": "# Makefile for Sphinx documentation\n#\n\n# You can set these variables from the command line.\nSPHINXOPTS    =\nSPHINXBUILD   = sphinx-build\nPAPER         =\n\n# Internal variables.\nPAPEROPT_a4     = -D latex_paper_size=a4\nPAPEROPT_letter = -D latex_paper_size=letter\nALLSPHINXOPTS   = -d _build/doctrees $(PAPEROPT_$(PAPER)) $(SPHINXOPTS) .\n\n.PHONY: help clean html web pickle htmlhelp latex changes linkcheck\n\nhelp:\n\t@echo \"Please use \\`make <target>' where <target> is one of\"\n\t@echo \"  html      to make standalone HTML files\"\n\t@echo \"  pickle    to make pickle files\"\n\t@echo \"  json      to make JSON files\"\n\t@echo \"  htmlhelp  to make HTML files and a HTML help project\"\n\t@echo \"  latex     to make LaTeX files, you can set PAPER=a4 or PAPER=letter\"\n\t@echo \"  changes   to make an overview over all changed/added/deprecated items\"\n\t@echo \"  linkcheck to check all external links for integrity\"\n\nclean:\n\t-rm -rf _build/*\n\nhtml:\n\tmkdir -p _build/html _build/doctrees\n\t$(SPHINXBUILD) -b html $(ALLSPHINXOPTS) _build/html\n\t@echo\n\t@echo \"Build finished. The HTML pages are in _build/html.\"\n\npickle:\n\tmkdir -p _build/pickle _build/doctrees\n\t$(SPHINXBUILD) -b pickle $(ALLSPHINXOPTS) _build/pickle\n\t@echo\n\t@echo \"Build finished; now you can process the pickle files.\"\n\nweb: pickle\n\njson:\n\tmkdir -p _build/json _build/doctrees\n\t$(SPHINXBUILD) -b json $(ALLSPHINXOPTS) _build/json\n\t@echo\n\t@echo \"Build finished; now you can process the JSON files.\"\n\nhtmlhelp:\n\tmkdir -p _build/htmlhelp _build/doctrees\n\t$(SPHINXBUILD) -b htmlhelp $(ALLSPHINXOPTS) _build/htmlhelp\n\t@echo\n\t@echo \"Build finished; now you can run HTML Help Workshop with the\" \\\n\t      \".hhp project file in _build/htmlhelp.\"\n\nlatex:\n\tmkdir -p _build/latex _build/doctrees\n\t$(SPHINXBUILD) -b latex $(ALLSPHINXOPTS) _build/latex\n\t@echo\n\t@echo \"Build finished; the LaTeX files are in _build/latex.\"\n\t@echo \"Run \\`make all-pdf' or \\`make all-ps' in that directory to\" \\\n\t      \"run these through (pdf)latex.\"\n\nchanges:\n\tmkdir -p _build/changes _build/doctrees\n\t$(SPHINXBUILD) -b changes $(ALLSPHINXOPTS) _build/changes\n\t@echo\n\t@echo \"The overview file is in _build/changes.\"\n\nlinkcheck:\n\tmkdir -p _build/linkcheck _build/doctrees\n\t$(SPHINXBUILD) -b linkcheck $(ALLSPHINXOPTS) _build/linkcheck\n\t@echo\n\t@echo \"Link check complete; look for any errors in the above output \" \\\n\t      \"or in _build/linkcheck/output.txt.\"\n\nepub:\n\t$(SPHINXBUILD) -b epub $(ALLSPHINXOPTS) _build/epub\n\t@echo\n\t@echo \"Build finished. The epub file is in _build/epub.\"\n"
  },
  {
    "path": "docs/_static/.gitignore",
    "content": ""
  },
  {
    "path": "docs/_templates/.gitignore",
    "content": ""
  },
  {
    "path": "docs/admin.rst",
    "content": ".. _ref-admin:\n\n===================\nDjango Admin Search\n===================\n\nHaystack comes with a base class to support searching via Haystack in the\nDjango admin. To use Haystack to search, inherit from ``haystack.admin.SearchModelAdmin``\ninstead of ``django.contrib.admin.ModelAdmin``.\n\nFor example::\n\n    from haystack.admin import SearchModelAdmin\n    from .models import MockModel\n\n\n    class MockModelAdmin(SearchModelAdmin):\n        haystack_connection = 'solr'\n        date_hierarchy = 'pub_date'\n        list_display = ('author', 'pub_date')\n\n\n    admin.site.register(MockModel, MockModelAdmin)\n\nYou can also specify the Haystack connection used by the search with the\n``haystack_connection`` property on the model admin class. If not specified,\nthe default connection will be used.\n\nIf you already have a base model admin class you use, there is also a mixin\nyou can use instead::\n\n    from django.contrib import admin\n    from haystack.admin import SearchModelAdminMixin\n    from .models import MockModel\n\n\n    class MyCustomModelAdmin(admin.ModelAdmin):\n        pass\n\n\n    class MockModelAdmin(SearchModelAdminMixin, MyCustomModelAdmin):\n        haystack_connection = 'solr'\n        date_hierarchy = 'pub_date'\n        list_display = ('author', 'pub_date')\n\n\n    admin.site.register(MockModel, MockModelAdmin)\n"
  },
  {
    "path": "docs/architecture_overview.rst",
    "content": ".. _ref-architecture-overview:\n\n=====================\nArchitecture Overview\n=====================\n\n``SearchQuerySet``\n------------------\n\nOne main implementation.\n\n* Standard API that loosely follows ``QuerySet``\n* Handles most queries\n* Allows for custom \"parsing\"/building through API\n* Dispatches to ``SearchQuery`` for actual query\n* Handles automatically creating a query\n* Allows for raw queries to be passed straight to backend.\n\n\n``SearchQuery``\n---------------\n\nImplemented per-backend.\n\n* Method for building the query out of the structured data.\n* Method for cleaning a string of reserved characters used by the backend.\n\nMain class provides:\n\n* Methods to add filters/models/order-by/boost/limits to the search.\n* Method to perform a raw search.\n* Method to get the number of hits.\n* Method to return the results provided by the backend (likely not a full list).\n\n\n``SearchBackend``\n-----------------\n\nImplemented per-backend.\n\n* Connects to search engine\n* Method for saving new docs to index\n* Method for removing docs from index\n* Method for performing the actual query\n\n\n``SearchSite``\n--------------\n\nOne main implementation.\n\n* Standard API that loosely follows ``django.contrib.admin.sites.AdminSite``\n* Handles registering/unregistering models to search on a per-site basis.\n* Provides a means of adding custom indexes to a model, like ``ModelAdmins``.\n\n\n``SearchIndex``\n---------------\n\nImplemented per-model you wish to index.\n\n* Handles generating the document to be indexed.\n* Populates additional fields to accompany the document.\n* Provides a way to limit what types of objects get indexed.\n* Provides a way to index the document(s).\n* Provides a way to remove the document(s).\n"
  },
  {
    "path": "docs/autocomplete.rst",
    "content": ".. _ref-autocomplete:\n\n============\nAutocomplete\n============\n\nAutocomplete is becoming increasingly common as an add-on to search. Haystack\nmakes it relatively simple to implement. There are two steps in the process,\none to prepare the data and one to implement the actual search.\n\nStep 1. Setup The Data\n======================\n\nTo do autocomplete effectively, the search backend uses n-grams (essentially\na small window passed over the string). Because this alters the way your\ndata needs to be stored, the best approach is to add a new field to your\n``SearchIndex`` that contains the text you want to autocomplete on.\n\nYou have two choices: ``NgramField`` and ``EdgeNgramField``. Though very similar,\nthe choice of field is somewhat important.\n\n* If you're working with standard text, ``EdgeNgramField`` tokenizes on\n  whitespace. This prevents incorrect matches when part of two different words\n  are mashed together as one n-gram. **This is what most users should use.**\n* If you're working with Asian languages or want to be able to autocomplete\n  across word boundaries, ``NgramField`` should be what you use.\n\nExample (continuing from the tutorial)::\n\n    import datetime\n    from haystack import indexes\n    from myapp.models import Note\n\n\n    class NoteIndex(indexes.SearchIndex, indexes.Indexable):\n        text = indexes.CharField(document=True, use_template=True)\n        author = indexes.CharField(model_attr='user')\n        pub_date = indexes.DateTimeField(model_attr='pub_date')\n        # We add this for autocomplete.\n        content_auto = indexes.EdgeNgramField(model_attr='content')\n\n        def get_model(self):\n            return Note\n\n        def index_queryset(self, using=None):\n            \"\"\"Used when the entire index for model is updated.\"\"\"\n            return Note.objects.filter(pub_date__lte=datetime.datetime.now())\n\nAs with all schema changes, you'll need to rebuild/update your index after\nmaking this change.\n\n\nStep 2. Performing The Query\n============================\n\nHaystack ships with a convenience method to perform most autocomplete searches.\nYou simply provide a field and the query you wish to search on to the\n``SearchQuerySet.autocomplete`` method. Given the previous example, an example\nsearch would look like::\n\n    from haystack.query import SearchQuerySet\n\n    SearchQuerySet().autocomplete(content_auto='old')\n    # Result match things like 'goldfish', 'cuckold' and 'older'.\n\nThe results from the ``SearchQuerySet.autocomplete`` method are full search\nresults, just like any regular filter.\n\nIf you need more control over your results, you can use standard\n``SearchQuerySet.filter`` calls. For instance::\n\n    from haystack.query import SearchQuerySet\n\n    sqs = SearchQuerySet().filter(content_auto=request.GET.get('q', ''))\n\nThis can also be extended to use ``SQ`` for more complex queries (and is what's\nbeing done under the hood in the ``SearchQuerySet.autocomplete`` method).\n\n\nExample Implementation\n======================\n\nThe above is the low-level backend portion of how you implement autocomplete.\nTo make it work in browser, you need both a view to run the autocomplete\nand some Javascript to fetch the results.\n\nSince it comes up often, here is an example implementation of those things.\n\n.. warning::\n\n    This code comes with no warranty. Don't ask for support on it. If you\n    copy-paste it and it burns down your server room, I'm not liable for any\n    of it.\n\n    It worked this one time on my machine in a simulated environment.\n\n    And yeah, semicolon-less + 2 space + comma-first. Deal with it.\n\nA stripped-down view might look like::\n\n    # views.py\n    import simplejson as json\n    from django.http import HttpResponse\n    from haystack.query import SearchQuerySet\n\n\n    def autocomplete(request):\n        sqs = SearchQuerySet().autocomplete(content_auto=request.GET.get('q', ''))[:5]\n        suggestions = [result.title for result in sqs]\n        # Make sure you return a JSON object, not a bare list.\n        # Otherwise, you could be vulnerable to an XSS attack.\n        the_data = json.dumps({\n            'results': suggestions\n        })\n        return HttpResponse(the_data, content_type='application/json')\n\nThe template might look like::\n\n    <!DOCTYPE html>\n    <html>\n    <head>\n      <meta charset=\"utf-8\">\n      <title>Autocomplete Example</title>\n    </head>\n    <body>\n      <h1>Autocomplete Example</h1>\n\n      <form method=\"post\" action=\"/search/\" class=\"autocomplete-me\">\n        <input type=\"text\" id=\"id_q\" name=\"q\">\n        <input type=\"submit\" value=\"Search!\">\n      </form>\n\n      <script src=\"http://ajax.googleapis.com/ajax/libs/jquery/1.8.3/jquery.min.js\"></script>\n      <script type=\"text/javascript\">\n        // In a perfect world, this would be its own library file that got included\n        // on the page and only the ``$(document).ready(...)`` below would be present.\n        // But this is an example.\n        var Autocomplete = function(options) {\n          this.form_selector = options.form_selector\n          this.url = options.url || '/search/autocomplete/'\n          this.delay = parseInt(options.delay || 300)\n          this.minimum_length = parseInt(options.minimum_length || 3)\n          this.form_elem = null\n          this.query_box = null\n        }\n\n        Autocomplete.prototype.setup = function() {\n          var self = this\n\n          this.form_elem = $(this.form_selector)\n          this.query_box = this.form_elem.find('input[name=q]')\n\n          // Watch the input box.\n          this.query_box.on('keyup', function() {\n            var query = self.query_box.val()\n\n            if(query.length < self.minimum_length) {\n              return false\n            }\n\n            self.fetch(query)\n          })\n\n          // On selecting a result, populate the search field.\n          this.form_elem.on('click', '.ac-result', function(ev) {\n            self.query_box.val($(this).text())\n            $('.ac-results').remove()\n            return false\n          })\n        }\n\n        Autocomplete.prototype.fetch = function(query) {\n          var self = this\n\n          $.ajax({\n            url: this.url\n          , data: {\n              'q': query\n            }\n          , success: function(data) {\n              self.show_results(data)\n            }\n          })\n        }\n\n        Autocomplete.prototype.show_results = function(data) {\n          // Remove any existing results.\n          $('.ac-results').remove()\n\n          var results = data.results || []\n          var results_wrapper = $('<div class=\"ac-results\"></div>')\n          var base_elem = $('<div class=\"result-wrapper\"><a href=\"#\" class=\"ac-result\"></a></div>')\n\n          if(results.length > 0) {\n            for(var res_offset in results) {\n              var elem = base_elem.clone()\n              // Don't use .html(...) here, as you open yourself to XSS.\n              // Really, you should use some form of templating.\n              elem.find('.ac-result').text(results[res_offset])\n              results_wrapper.append(elem)\n            }\n          }\n          else {\n            var elem = base_elem.clone()\n            elem.text(\"No results found.\")\n            results_wrapper.append(elem)\n          }\n\n          this.query_box.after(results_wrapper)\n        }\n\n        $(document).ready(function() {\n          window.autocomplete = new Autocomplete({\n            form_selector: '.autocomplete-me'\n          })\n          window.autocomplete.setup()\n        })\n      </script>\n    </body>\n    </html>\n"
  },
  {
    "path": "docs/backend_support.rst",
    "content": ".. _ref-backend-support:\n\n===============\nBackend Support\n===============\n\n\nSupported Backends\n==================\n\n* Solr_\n* ElasticSearch_\n* Whoosh_\n* Xapian_\n\n.. _Solr: http://lucene.apache.org/solr/\n.. _ElasticSearch: http://elasticsearch.org/\n.. _Whoosh: https://github.com/mchaput/whoosh/\n.. _Xapian: http://xapian.org/\n\n\nBackend Capabilities\n====================\n\nSolr\n----\n\n**Complete & included with Haystack.**\n\n* Full SearchQuerySet support\n* Automatic query building\n* \"More Like This\" functionality\n* Term Boosting\n* Faceting\n* Stored (non-indexed) fields\n* Highlighting\n* Spatial search\n* Requires: pysolr (2.0.13+) & Solr 3.5+\n\nElasticSearch\n-------------\n\n**Complete & included with Haystack.**\n\n* Full SearchQuerySet support\n* Automatic query building\n* \"More Like This\" functionality\n* Term Boosting\n* Faceting (up to 100 facets)\n* Stored (non-indexed) fields\n* Highlighting\n* Spatial search\n* Requires: `elasticsearch-py <https://pypi.python.org/pypi/elasticsearch>`_ 1.x, 2.x, or 5.X.\n\nWhoosh\n------\n\n**Complete & included with Haystack.**\n\n* Full SearchQuerySet support\n* Automatic query building\n* \"More Like This\" functionality\n* Term Boosting\n* Stored (non-indexed) fields\n* Highlighting\n* Requires: whoosh (2.0.0+)\n* Per-field analyzers\n\nXapian\n------\n\n**Complete & available as a third-party download.**\n\n* Full SearchQuerySet support\n* Automatic query building\n* \"More Like This\" functionality\n* Term Boosting\n* Faceting\n* Stored (non-indexed) fields\n* Highlighting\n* Requires: Xapian 1.0.5+ & python-xapian 1.0.5+\n* Backend can be downloaded here: `xapian-haystack <http://github.com/notanumber/xapian-haystack/>`__\n\nBackend Support Matrix\n======================\n\n+----------------+------------------------+---------------------+----------------+------------+----------+---------------+--------------+---------+\n| Backend        | SearchQuerySet Support | Auto Query Building | More Like This | Term Boost | Faceting | Stored Fields | Highlighting | Spatial |\n+================+========================+=====================+================+============+==========+===============+==============+=========+\n| Solr           | Yes                    | Yes                 | Yes            | Yes        | Yes      | Yes           | Yes          | Yes     |\n+----------------+------------------------+---------------------+----------------+------------+----------+---------------+--------------+---------+\n| ElasticSearch  | Yes                    | Yes                 | Yes            | Yes        | Yes      | Yes           | Yes          | Yes     |\n+----------------+------------------------+---------------------+----------------+------------+----------+---------------+--------------+---------+\n| Whoosh         | Yes                    | Yes                 | Yes            | Yes        | No       | Yes           | Yes          | No      |\n+----------------+------------------------+---------------------+----------------+------------+----------+---------------+--------------+---------+\n| Xapian         | Yes                    | Yes                 | Yes            | Yes        | Yes      | Yes           | Yes (plugin) | No      |\n+----------------+------------------------+---------------------+----------------+------------+----------+---------------+--------------+---------+\n\n\nUnsupported Backends & Alternatives\n===================================\n\nIf you have a search engine which you would like to see supported in Haystack, the current recommendation is\nto develop a plugin following the lead of `xapian-haystack <https://pypi.python.org/pypi/xapian-haystack>`_ so\nthat project can be developed and tested independently of the core Haystack release schedule.\n\nSphinx\n------\n\nThis backend has been requested multiple times over the years but does not yet have a volunteer maintainer. If\nyou would like to work on it, please contact the Haystack maintainers so your project can be linked here and,\nif desired, added to the `django-haystack <https://github.com/django-haystack/>`_ organization on GitHub.\n\nIn the meantime, Sphinx users should consider Jorge C. Leitão's\n`django-sphinxql <https://github.com/jorgecarleitao/django-sphinxql>`_ project.\n"
  },
  {
    "path": "docs/best_practices.rst",
    "content": ".. _ref-best-practices:\n\n==============\nBest Practices\n==============\n\nWhat follows are some general recommendations on how to improve your search.\nSome tips represent performance benefits, some provide a better search index.\nYou should evaluate these options for yourself and pick the ones that will\nwork best for you. Not all situations are created equal and many of these\noptions could be considered mandatory in some cases and unnecessary premature\noptimizations in others. Your mileage may vary.\n\n\nGood Search Needs Good Content\n==============================\n\nMost search engines work best when they're given corpuses with predominantly\ntext (as opposed to other data like dates, numbers, etc.) in decent quantities\n(more than a couple words). This is in stark contrast to the databases most\npeople are used to, which rely heavily on non-text data to create relationships\nand for ease of querying.\n\nTo this end, if search is important to you, you should take the time to\ncarefully craft your ``SearchIndex`` subclasses to give the search engine the\nbest information you can. This isn't necessarily hard but is worth the\ninvestment of time and thought. Assuming you've only ever used the\n``BasicSearchIndex``, in creating custom ``SearchIndex`` classes, there are\nsome easy improvements to make that will make your search better:\n\n* For your ``document=True`` field, use a well-constructed template.\n* Add fields for data you might want to be able to filter by.\n* If the model has related data, you can squash good content from those\n  related models into the parent model's ``SearchIndex``.\n* Similarly, if you have heavily de-normalized models, it may be best\n  represented by a single indexed model rather than many indexed models.\n\nWell-Constructed Templates\n--------------------------\n\nA relatively unique concept in Haystack is the use of templates associated with\n``SearchIndex`` fields. These are data templates, will never been seen by users\nand ideally contain no HTML. They are used to collect various data from the\nmodel and structure it as a document for the search engine to analyze and index.\n\n.. note::\n\n    If you read nothing else, this is the single most important thing you can\n    do to make search on your site better for your users. Good templates can\n    make or break your search and providing the search engine with good content\n    to index is critical.\n\nGood templates structure the data well and incorporate as much pertinent text\nas possible. This may include additional fields such as titles, author\ninformation, metadata, tags/categories. Without being artificial, you want to\nconstruct as much context as you can. This doesn't mean you should necessarily\ninclude every field, but you should include fields that provide good content\nor include terms you think your users may frequently search on.\n\nUnless you have very unique numbers or dates, neither of these types of data\nare a good fit within templates. They are usually better suited to other\nfields for filtering within a ``SearchQuerySet``.\n\nAdditional Fields For Filtering\n-------------------------------\n\nDocuments by themselves are good for generating indexes of content but are\ngenerally poor for filtering content, for instance, by date. All search engines\nsupported by Haystack provide a means to associate extra data as\nattributes/fields on a record. The database analogy would be adding extra\ncolumns to the table for filtering.\n\nGood candidates here are date fields, number fields, de-normalized data from\nrelated objects, etc. You can expose these things to users in the form of a\ncalendar range to specify, an author to look up or only data from a certain\nseries of numbers to return.\n\nYou will need to plan ahead and anticipate what you might need to filter on,\nthough with each field you add, you increase storage space usage. It's generally\n**NOT** recommended to include every field from a model, just ones you are\nlikely to use.\n\nRelated Data\n------------\n\nRelated data is somewhat problematic to deal with, as most search engines are\nbetter with documents than they are with relationships. One way to approach this\nis to de-normalize a related child object or objects into the parent's document\ntemplate. The inclusion of a foreign key's relevant data or a simple Django\n``{% for %}`` templatetag to iterate over the related objects can increase the\nsalient data in your document. Be careful what you include and how you structure\nit, as this can have consequences on how well a result might rank in your\nsearch.\n\n\nAvoid Hitting The Database\n==========================\n\nA very easy but effective thing you can do to drastically reduce hits on the\ndatabase is to pre-render your search results using stored fields then disabling\nthe ``load_all`` aspect of your ``SearchView``.\n\n.. warning::\n\n    This technique may cause a substantial increase in the size of your index\n    as you are basically using it as a storage mechanism.\n\nTo do this, you setup one or more stored fields (`indexed=False`) on your\n``SearchIndex`` classes. You should specify a template for the field, filling it\nwith the data you'd want to display on your search results pages. When the model\nattached to the ``SearchIndex`` is placed in the index, this template will get\nrendered and stored in the index alongside the record.\n\n.. note::\n\n    The downside of this method is that the HTML for the result will be locked\n    in once it is indexed. To make changes to the structure, you'd have to\n    reindex all of your content. It also limits you to a single display of the\n    content (though you could use multiple fields if that suits your needs).\n\nThe second aspect is customizing your ``SearchView`` and its templates. First,\npass the ``load_all=False`` to your ``SearchView``, ideally in your URLconf.\nThis prevents the ``SearchQuerySet`` from loading all models objects for results\nahead of time. Then, in your template, simply display the stored content from\nyour ``SearchIndex`` as the HTML result.\n\n.. warning::\n\n    To do this, you must absolutely avoid using ``{{ result.object }}`` or any\n    further accesses beyond that. That call will hit the database, not only\n    nullifying your work on lessening database hits, but actually making it\n    worse as there will now be at least query for each result, up from a single\n    query for each type of model with ``load_all=True``.\n\n\nContent-Type Specific Templates\n===============================\n\nFrequently, when displaying results, you'll want to customize the HTML output\nbased on what model the result represents.\n\nIn practice, the best way to handle this is through the use of ``include``\nalong with the data on the ``SearchResult``.\n\nYour existing loop might look something like::\n\n    {% for result in page.object_list %}\n        <p>\n            <a href=\"{{ result.object.get_absolute_url }}\">{{ result.object.title }}</a>\n        </p>\n    {% empty %}\n        <p>No results found.</p>\n    {% endfor %}\n\nAn improved version might look like::\n\n    {% for result in page.object_list %}\n        {% if result.content_type == \"blog.post\" %}\n        {% include \"search/includes/blog/post.html\" %}\n        {% endif %}\n        {% if result.content_type == \"media.photo\" %}\n        {% include \"search/includes/media/photo.html\" %}\n        {% endif %}\n    {% empty %}\n        <p>No results found.</p>\n    {% endfor %}\n\nThose include files might look like::\n\n    # search/includes/blog/post.html\n    <div class=\"post_result\">\n        <h3><a href=\"{{ result.object.get_absolute_url }}\">{{ result.object.title }}</a></h3>\n\n        <p>{{ result.object.tease }}</p>\n    </div>\n\n    # search/includes/media/photo.html\n    <div class=\"photo_result\">\n        <a href=\"{{ result.object.get_absolute_url }}\">\n        <img src=\"http://your.media.example.com/media/{{ result.object.photo.url }}\"></a>\n        <p>Taken By {{ result.object.taken_by }}</p>\n    </div>\n\nYou can make this even better by standardizing on an includes layout, then\nwriting a template tag or filter that generates the include filename. Usage\nmight looks something like::\n\n    {% for result in page.object_list %}\n        {% with result|search_include as fragment %}\n        {% include fragment %}\n        {% endwith %}\n    {% empty %}\n        <p>No results found.</p>\n    {% endfor %}\n\n\nReal-Time Search\n================\n\nIf your site sees heavy search traffic and up-to-date information is very\nimportant, Haystack provides a way to constantly keep your index up to date.\n\nYou can enable the ``RealtimeSignalProcessor`` within your settings, which\nwill allow Haystack to automatically update the index whenever a model is\nsaved/deleted.\n\nYou can find more information within the :doc:`signal_processors` documentation.\n\n\nUse Of A Queue For A Better User Experience\n===========================================\n\nBy default, you have to manually reindex content, Haystack immediately tries to merge\nit into the search index. If you have a write-heavy site, this could mean your\nsearch engine may spend most of its time churning on constant merges. If you can\nafford a small delay between when a model is saved and when it appears in the\nsearch results, queuing these merges is a good idea.\n\nYou gain a snappier interface for users as updates go into a queue (a fast\noperation) and then typical processing continues. You also get a lower churn\nrate, as most search engines deal with batches of updates better than many\nsingle updates. You can also use this to distribute load, as the queue consumer\ncould live on a completely separate server from your webservers, allowing you\nto tune more efficiently.\n\nImplementing this is relatively simple. There are two parts, creating a new\n``QueuedSignalProcessor`` class and creating a queue processing script to\nhandle the actual updates.\n\nFor the ``QueuedSignalProcessor``, you should inherit from\n``haystack.signals.BaseSignalProcessor``, then alter the ``setup/teardown``\nmethods to call an enqueuing method instead of directly calling\n``handle_save/handle_delete``. For example::\n\n    from haystack import signals\n\n\n    class QueuedSignalProcessor(signals.BaseSignalProcessor):\n        # Override the built-in.\n        def setup(self):\n            models.signals.post_save.connect(self.enqueue_save)\n            models.signals.post_delete.connect(self.enqueue_delete)\n\n        # Override the built-in.\n        def teardown(self):\n            models.signals.post_save.disconnect(self.enqueue_save)\n            models.signals.post_delete.disconnect(self.enqueue_delete)\n\n        # Add on a queuing method.\n        def enqueue_save(self, sender, instance, **kwargs):\n            # Push the save & information onto queue du jour here\n            ...\n\n        # Add on a queuing method.\n        def enqueue_delete(self, sender, instance, **kwargs):\n            # Push the delete & information onto queue du jour here\n            ...\n\nFor the consumer, this is much more specific to the queue used and your desired\nsetup. At a minimum, you will need to periodically consume the queue, fetch the\ncorrect index from the ``SearchSite`` for your application, load the model from\nthe message and pass that model to the ``update_object`` or ``remove_object``\nmethods on the ``SearchIndex``. Proper grouping, batching and intelligent\nhandling are all additional things that could be applied on top to further\nimprove performance.\n"
  },
  {
    "path": "docs/boost.rst",
    "content": ".. _ref-boost:\n\n=====\nBoost\n=====\n\n\nScoring is a critical component of good search. Normal full-text searches\nautomatically score a document based on how well it matches the query provided.\nHowever, sometimes you want certain documents to score better than they\notherwise would. Boosting is a way to achieve this. There are three types of\nboost:\n\n* Term Boost\n* Document Boost\n* Field Boost\n\n.. note::\n\n    Document & Field boost support was added in Haystack 1.1.\n\nDespite all being types of boost, they take place at different times and have\nslightly different effects on scoring.\n\nTerm boost happens at query time (when the search query is run) and is based\naround increasing the score if a certain word/phrase is seen.\n\nOn the other hand, document & field boosts take place at indexing time (when\nthe document is being added to the index). Document boost causes the relevance\nof the entire result to go up, where field boost causes only searches within\nthat field to do better.\n\n.. warning::\n\n  Be warned that boost is very, very sensitive & can hurt overall search\n  quality if over-zealously applied. Even very small adjustments can affect\n  relevance in a big way.\n\nTerm Boost\n==========\n\nTerm boosting is achieved by using ``SearchQuerySet.boost``. You provide it\nthe term you want to boost on & a floating point value (based around ``1.0``\nas 100% - no boost).\n\nExample::\n\n    # Slight increase in relevance for documents that include \"banana\".\n    sqs = SearchQuerySet().boost('banana', 1.1)\n\n    # Big decrease in relevance for documents that include \"blueberry\".\n    sqs = SearchQuerySet().boost('blueberry', 0.8)\n\nSee the :doc:`searchqueryset_api` docs for more details on using this method.\n\n\nDocument Boost\n==============\n\nDocument boosting is done by adding a ``boost`` field to the prepared data\n``SearchIndex`` creates. The best way to do this is to override\n``SearchIndex.prepare``::\n\n    from haystack import indexes\n    from notes.models import Note\n\n\n    class NoteSearchIndex(indexes.SearchIndex, indexes.Indexable):\n        # Your regular fields here then...\n\n        def prepare(self, obj):\n            data = super(NoteSearchIndex, self).prepare(obj)\n            data['boost'] = 1.1\n            return data\n\n\nAnother approach might be to add a new field called ``boost``. However, this\ncan skew your schema and is not encouraged.\n\n\nField Boost\n===========\n\nField boosting is enabled by setting the ``boost`` kwarg on the desired field.\nAn example of this might be increasing the significance of a ``title``::\n\n    from haystack import indexes\n    from notes.models import Note\n\n\n    class NoteSearchIndex(indexes.SearchIndex, indexes.Indexable):\n        text = indexes.CharField(document=True, use_template=True)\n        title = indexes.CharField(model_attr='title', boost=1.125)\n\n        def get_model(self):\n            return Note\n\n.. note::\n\n  Field boosting only has an effect when the SearchQuerySet filters on the\n  field which has been boosted. If you are using a default search view or\n  form you will need override the search method or other include the field\n  in your search query. This example CustomSearchForm searches the automatic\n  ``content`` field and the ``title`` field which has been boosted::\n\n    from haystack.forms import SearchForm\n\n    class CustomSearchForm(SearchForm):\n\n        def search(self):\n            if not self.is_valid():\n                return self.no_query_found()\n\n            if not self.cleaned_data.get('q'):\n                return self.no_query_found()\n\n            q = self.cleaned_data['q']\n            sqs = self.searchqueryset.filter(SQ(content=AutoQuery(q)) | SQ(title=AutoQuery(q)))\n\n            if self.load_all:\n                sqs = sqs.load_all()\n\n            return sqs.highlight()\n"
  },
  {
    "path": "docs/changelog.rst",
    "content": "Changelog\n=========\n\n\n%%version%% (unreleased)\n------------------------\n- Docs: don't tell people how to install Python packages. [Chris Adams]\n\n  It's 2018, \"pip install <packagename>\" is the only thing we should\n  volunteer.\n- Update Elasticsearch documentation. [Chris Adams]\n\n  * Add 5.x to supported versions\n  * Replace configuration and installation information with\n    pointers to the official docs\n  * Stop mentioning pyes since it’s fallen behind the official\n    client in awareness\n  * Don’t tell people how to install Python packages\n- Fix get_coords() calls. [Chris Adams]\n- Update README & contributor guide. [Chris Adams]\n- Blacken. [Chris Adams]\n- Isort everything. [Chris Adams]\n- Update code style settings. [Chris Adams]\n\n  Prep for Blackening\n- Remove PyPy / Django 2 targets. [Chris Adams]\n\n  We'll restore these when pypy3 is more mainstream\n- Use default JRE rather than requiring Oracle. [Chris Adams]\n\n  OpenJDK is also supported and that does not require accepting a license.\n- Changed ES5.x test skip message to match the friendlier 2.x one.\n  [Bruno Marques]\n- Fixed faceted search and autocomplete test. [Bruno Marques]\n- Removed ES5 code that actually never runs. [Bruno Marques]\n- Fixed kwargs in ES5's build_search_query. [Bruno Marques]\n- ES5: fixed MLT, within and dwithin. [Bruno Marques]\n- Assorted ES5.x fixes. [Bruno Marques]\n- Re-added sorting, highlighting and suggesting to ES5.x backend. [Bruno\n  Marques]\n- Fixed filters and fuzziness on ES5.x backend. [Bruno Marques]\n- Added Java 8 to Travis dependencies. [Bruno Marques]\n- Started Elasticsearch 5.x support. [Bruno Marques]\n- Style change to avoid ternary logic on the end of a line. [Chris\n  Adams]\n\n  This is unchanged from #1475 but avoids logic at the end of the line\n- Do not raise when model cannot be searched. [benvand]\n\n  * Return empty string.\n  * Test.\n- Merge pull request #1616 from hornn/batch_order. [Chris Adams]\n\n  Order queryset by pk in update batching\n- Order queryset by pk in update batching This solves #1615. [Noa Horn]\n\n  The queryset is not ordered by pk by default, however the batching filter relies on the results being ordered.\n  When the results are not ordered by pk, some objects are not indexed.\n  This can happen when the underlying database doesn't have default ordering by pk, or when the model or index_queryset() have a different ordering.\n- Merge pull request #1612 from hornn/patch-1. [Chris Adams]\n\n  Construct django_ct based on model instead of object\n- Update indexes.py. [Noa Horn]\n\n  Construct django_ct based on model instead of object.\n  This solves issue #1611 - delete stale polymorphic model documents.\n- Merge pull request #1610 from erez-o/patch-1. [Chris Adams]\n\n  Update installing_search_engines.rst\n- Update installing_search_engines.rst. [Chris Adams]\n- Update installing_search_engines.rst. [Erez Oxman]\n\n  Updated docs about Solr 6.X+ \"More like this\"\n- Avoid UnicodeDecodeError when an error occurs while resolving\n  attribute lookups. [Chris Adams]\n\n  Thanks to Martin Burchell (@martinburchell) for the patch in #1599\n- Fix UnicodeDecodeError in error message. [Martin Burchell]\n\n  Because of the way the default __repr__ works in Django models, we can get a\n  UnicodeDecodeError when creating the SearchFieldError if a model does not have\n  an attribute. eg:\n  UnicodeDecodeError: 'ascii' codec can't decode byte 0xc3 in position 11: ordinal not in range(128)\n  and this hides the real problem.\n\n  I have left alone the other SearchFieldError in this method because current_obj is always\n  None. The error message is a bit strange in this case but it won't suffer from the same problem.\n- Add max retries option to rebuild_index, matching update_index. [Chris\n  Adams]\n\n  Thanks to @2miksyn for the patch in #1598\n- Update rebuild_index.py. [2miksyn]\n\n  Add max-retries argument to rebuild_index managment command. This is useful for debug at development time\n\n  Add Django 2.1 compatibility. [Tim Graham]\n\n\nv2.8.1 (2018-03-16)\n-------------------\n- Merge pull request #1596 from klass-ivan/collecting-deep-attr-through-\n  m2m. [Chris Adams]\n\n  Fixed collection of deep attributes through m2m relation\n- Fixed collection of deep attributes through m2m relation. [Ivan Klass]\n\n\nv2.8.0 (2018-03-09)\n-------------------\n- Optimize ElasticSearch backend (closes #1590) [Chris Adams]\n\n  Thanks to @klass-ivan for the patch\n- [elasticsearch backend] - Fixed index re-obtaining for every field.\n  [Ivan Klass]\n- Django 2.0 compatibility (closes #1582) [Chris Adams]\n\n  Thanks to @mpauly and @timgraham for working on this and @dani0805,\n  @andrewbenedictwallace, @rabidcicada, @webtweakers, @nadimtuhin, and\n  @JonLevischi for testing.\n- Implemented TG's review comments. [Martin Pauly]\n- Drop support for old django versions. [Martin Pauly]\n- For some reason the mock needs to return something. [Martin Pauly]\n- Django 2.0 changes to tests. [Martin Pauly]\n- Dropped a few unnecessary interactive=False. [Martin Pauly]\n- Replace get_coords() by coords in more places. [Martin Pauly]\n- Ignore python2 Django2 combination. [Martin Pauly]\n- Drop tests for Django < 1.11. [Martin Pauly]\n- Update requirements in setup.py. [Martin Pauly]\n- Update imports to drop Django 1.8 support. [Martin Pauly]\n- Fix intendation error in tox.ini. [Martin Pauly]\n- Merge https://github.com/django-haystack/django-haystack. [Martin\n  Pauly]\n- Added a test for exclusion of M2M fields for ModelSearchIndex. [Martin\n  Pauly]\n- In Django 2.0 ForeinKeys must have on_delete. [Martin Pauly]\n- Assuming that everyone who wants to run these tests upgrades pysolr.\n  [Martin Pauly]\n- Django 2.0 is not compatible with python 2.7. [Martin Pauly]\n- Deal with tuples and strings. [Martin Pauly]\n- Fix a bug due to string __version__ of pysolr. [Martin Pauly]\n- Fix tox. [Martin Pauly]\n- Mocking order. [Martin Pauly]\n- Reverse order. [Martin Pauly]\n- Update test - the interactive kwarg is only passed to the clear_index\n  command. [Martin Pauly]\n- Revert \"Trigger travis build\" [Martin Pauly]\n\n  This reverts commit 7a9ac3824d7c6d5a9de63e4144ccb8c78daf60d6.\n- Trigger travis build. [Martin Pauly]\n- Update authors. [Martin Pauly]\n- Update tests. [Martin Pauly]\n- Update imports. [Martin Pauly]\n- Fix missing attribute rel. [Martin Pauly]\n- Add the corresponding option for update_index. [Martin Pauly]\n- Fix import order. [Martin Pauly]\n- Exclude unused options for call of clear_index and update_index.\n  [Martin Pauly]\n- Merge pull request #1576 from claudep/pep479. [Chris Adams]\n\n  Replaced deprecated StopIteration by simple return\n- Replaced deprecated StopIteration by simple return. [Claude Paroz]\n\n  Compliance to PEP 479.\n- Merge pull request #1588 from bit/patch-1. [Justin Caratzas]\n\n  make BaseInput.__repr__ for in python3\n- Update inputs.py. [bit]\n- Make BaseInput.__repr__ for in python3. [bit]\n\n  remove call to __unicode__\n\n\nv2.7.0 (2018-01-29)\n-------------------\n- Use Python 3-compatible version comparison. [Chris Adams]\n- Add Django 1.11 and Python 3.6 to tox config. [Chris Adams]\n- Tests use pysolr version_info to work on Python 3.6. [Chris Adams]\n- Upgrade dependencies. [Chris Adams]\n- Align haystack's version attributes with pysolr. [Chris Adams]\n\n  __version__ = pkg resource string\n  version_info = more usable tuple\n- Fixed order_by multiple fields in whoosh backend. [Chris Adams]\n\n  Thanks @rjhelms and @TTGmarkad for the patch\n\n  Closes #604\n- Fixed order_by multiple fields in whoosh backend. [Rob Hailman]\n\n  Implemented fix as suggested in issue #604\n- Merge pull request #1551 from RabidCicada/uuid-pk-fix. [Chris Adams]\n\n  Uuid pk fix\n- Fixed final bug with test_related_load_all_queryset test. [Kyle Stapp]\n- Fixing errors. [Kyle Stapp]\n- Initial attempt at adding testing framework for uuid models. [Kyle\n  Stapp]\n- Coerce the pk string to the type that matches the models pk object.\n  [Kyle Stapp]\n- Merge pull request #1555 from whyscream/django-pinning. [Chris Adams]\n\n  Fix django version pinning in setup.py\n- Fix django pinning in setup.py. [Tom Hendrikx]\n- Remove unused import. [Chris Adams]\n- Update_index: remove dead variable assignment. [Chris Adams]\n\n  This declaration was meaningless since the value would be unconditionally overwritten by the `total = qs.count()` statement above on the next loop iteration, before anything read the value.\n- PEP-8. [Chris Adams]\n- LocationField.convert() will raise TypeError for unknown inputs.\n  [Chris Adams]\n- Whoosh: prevent more_like_this from hitting an uninitialized variable.\n  [Chris Adams]\n\n  This was uncommon but previously possible\n- Remove dead code from Whoosh backend. [Chris Adams]\n- PEP-8. [Chris Adams]\n- Merge pull request #1526 from RabidCicada/better-default-configs.\n  [Chris Adams]\n\n  Better default configs\n- Comment editing. [Chris Adams]\n- Adding the template updates I forgot. [Kyle Stapp]\n- Merge pull request #1544 from jbzdak/jbzdak-patch. [Chris Adams]\n\n  Update haystack.generic_views.SearchView to handle empty GET requests\n- Update generic_views.py. [Jacek Bzdak]\n\n  Fix for inconsistent  behavior when GET parameters are present.\n- Merge pull request #1541 from alasdairnicol/patch-1. [Chris Adams]\n\n  Add link to 2.5.x docs\n- Add link to 2.5.x docs. [Alasdair Nicol]\n- Updated config setting for solr 6.5. [Jaimin]\n\n  Updated documentation to enable spellcheck for Solr 6.5.\n- Add load_all to the generic views form kwargs. [Alex Tomkins]\n\n  The deprecated views in views.py automatially pass `load_all` to the search form. Class based generic views will now match this behaviour.\n- Update who_uses.rst. [davneet4u]\n- Update who_uses.rst. [davneet4u]\n- Added teachoo to sites using. [davneet4u]\n- Merge pull request #1527 from palmeida/patch-1. [Chris Adams]\n\n  Remove extraneous word\n- Remove extraneous word. [Paulo Almeida]\n- Merge pull request #1530 from tomkins/travis-elasticsearch. [Chris\n  Adams]\n\n  Fix elasticsearch installation in travis\n- Fix elasticsearch installation in travis. [Alex Tomkins]\n\n  Recent travis updates installed a later version of elasticsearch by default, so we need to force a downgrade to test the right versions.\n- Changed GeoDjango Link. [Mohit Khandelwal]\n\n  Changed GeoDjango link from geodjango.org to https://docs.djangoproject.com/en/1.11/ref/contrib/gis/\n- Ensure that custom highlighter tests consistently clean up monkey-\n  patches. [Chris Adams]\n\n  This didn't cause problems currently but there's no point in leaving a\n  trap for the future.\n- Prefer full import path for Highlighter. [Chris Adams]\n\n  This maintains compatibility with existing code but updates\n  the docs & tests to use `haystack.utils.highlighting` rather\n  than just `haystack.utils` to import `Highlighter`.\n- PEP-8. [Chris Adams]\n- Update default identifier to support UUID primary keys. [Chris Adams]\n\n  Thanks to @rabidcicada for the patch & tests!\n\n  Closes #1498\n  Closes #1497\n  Closes #1515\n- Merge pull request #1479 from mjl/mjl-issue-1077. [Chris Adams]\n\n  rebuild_index slowdown fix (#1077)\n- Merge remote-tracking branch 'upstream/master' into mjl-issue-1077.\n  [Martin J. Laubach]\n- Merge branch '1504-solr-6-by-default' [Chris Adams]\n- Documentation copy-editing. [Chris Adams]\n- Tidy build_solr_schema help text and exceptions. [Chris Adams]\n- Build_solr_schema: reload should not assume the backend name. [Chris\n  Adams]\n- Attempt to fix on Travis.  I guess it runs from different directory.\n  [Kyle T Stapp]\n- Cleaner approach based on acdh's comments.  We don't carry around\n  baggage....but I also am not worried that random lines will get\n  inserted into alien future configs. [Kyle T Stapp]\n- Updated docs to add warning about template filename change.  Fixed\n  typo. [Kyle T Stapp]\n- Removed Unnecessary stopword files as requested. [Kyle T Stapp]\n- Updated docs to match new implementation. [Kyle T Stapp]\n- Tidying test suite. [Chris Adams]\n\n  * Remove some test utilities which were only used once\n    or (after refactoring) not at all\n  * PEP-8 cleanup\n- Tidy Solr backend tests. [Chris Adams]\n\n  * Use assertSetEqual for prettier debug output on failure\n  * Whitespace around operators\n- Update build_solr_schema arguments. [Chris Adams]\n\n  * Use longer names for command-line options\n  * Tidy variable names & error messages\n- Tests: better name for Solr-specific management commands. [Chris\n  Adams]\n\n  This makes things like editor open-by-name shortcuts less confusing\n- Update Solr management command tests. [Chris Adams]\n\n  * Use os.path.join for filesystem path construction\n  * PEP-8 variable naming, whitespace\n  * Use assertGreater for str.find checks on rendered XML\n- Solr: ensure that the default document field is always applied. [Chris\n  Adams]\n\n  This is normally moot but newer versions of Solr have deprecated the\n  <defaultSearchField> configuration option and certain Haystack queries\n  may break if you have removed that configuration element.\n- Update Solr spelling suggestion handling. [Chris Adams]\n\n  The support matrix for this is a problem since the Solr response format changes based on the version,\n  configuration, and query parameters (i.e. spellcheck.collateExtendedResults) so this is moved into a separate function which logs errors and honors\n  the backend fail silently setting.\n\n  This has been tested using Solr 6.4 and 6.5 with both\n  the regular and collateExtendedResults formats.\n- Addressing Chris' comments on comment style :) >.< [Kyle T Stapp]\n- Addressing Chris' comments on boolean check. [Kyle T Stapp]\n- Moved constants.HAYSTACK_DOCUMENT_FIELD to constants.DOCUMENT_FIELD to\n  follow convention. [Kyle T Stapp]\n- Test Solr launcher updates. [Chris Adams]\n\n  * Ensure the log directory exists\n  * Remove dead code\n  * Remove GC_LOG_OPTS assignments\n- Build_solr_schema tidying. [Chris Adams]\n\n  * Construct filesystem paths using `os.path`\n  * Remove need to use `traceback`\n  * Avoid dealing with HTTP request URL encoding\n- Build_solr_schema: less abbreviated keyword argument name. [Chris\n  Adams]\n- Tidy imports. [Chris Adams]\n- PEP-8. [Chris Adams]\n- PEP-8. [Chris Adams]\n- Remove unused imports. [Chris Adams]\n- Run isort on files updated in this branch. [Chris Adams]\n- Merge and deconflict of upstream PEP8 changes. [Kyle T Stapp]\n- PEP8 Fixes.  Mostly ignoring line length PEP violations due to\n  conciseness of assertStatements. [Kyle T Stapp]\n- Python 3 compatibility updates. [Kyle T Stapp]\n- Allow overriding collate for spellcheck at most entrypoints that\n  accept kwargs (search mlt etc).  get_spelling_suggestions() will need\n  to be updated. [Kyle T Stapp]\n- Fixing a problem introduced in build_template. [Kyle T Stapp]\n- Working template management and tests.  Lots of plumbing to test.\n  More tests to come soon. [Kyle T Stapp]\n- Final Fixes to support 6.4.0 and 6.5.0 spelling suggestions. [Kyle T\n  Stapp]\n- Thinking solr versoin is wrong. [Kyle T Stapp]\n- Printing raw response that I found existed:) [Kyle T Stapp]\n- More troubleshooting and fixing old test back to original check. [Kyle\n  T Stapp]\n- More troubleshooting. [Kyle T Stapp]\n- Fix wrong object in test for spelling suggestions. [Kyle T Stapp]\n- More troubleshooting. [Kyle T Stapp]\n- More troubleshooting. [Kyle T Stapp]\n- Troubleshooting travis failure that is not replicatable here. [Kyle T\n  Stapp]\n- Adjusting matrix to include django 1.11.  Adjusting wait_for_solr\n  script to try to ping correct location.  Adding ping handler. [Kyle T\n  Stapp]\n- Trying to get a travis platform that supports jdk setting. [Kyle T\n  Stapp]\n- Attempting to get travis to see jdk8 request. [Kyle T Stapp]\n- Fix result_class swap failure. [Kyle T Stapp]\n- Fix Collation based results.  Add future plumbing for returning more\n  than one 'suggestion' but keep current behavior. Update schema\n  definition to get rid of _text_ [Kyle T Stapp]\n- Fix LiveSolrSearchQueryTestCase.  Specifically spellcheck.  Added\n  spellcheck to select requestHandler and fixed parsing changes needed\n  in core on our side. [Kyle T Stapp]\n- Fix LiveSolrMoreLikeThisTestCase. Also fix the deferred case (whoops)\n  [Kyle T Stapp]\n- Fix LiveSolrMoreLikeThisTestCase. [Kyle T Stapp]\n- Fixed LiveSolrAutocompleteTestCase Failure. [Kyle T Stapp]\n- Fixed LiveSolrContentExtractionTestCase Failure.  Reworked core\n  creation and configuration a little. [Kyle T Stapp]\n- Reworked start-solr-test-server to work with modern solr.  Reworked\n  solr spinup to create a default core using predefined config in\n  server/confdir. [Kyle T Stapp]\n- Update solr template to be solr6 compatible. [Kyle T Stapp]\n- Fix to tests to run with context dicts instead of context objects for\n  django 1.10. [Kyle T Stapp]\n- Fix django template context passing. [Kyle T Stapp]\n- Merge pull request #1500 from rafaelhdr/master. [Chris Adams]\n\n  Updated tutorial URL configuration example\n- Updated README for CKEditor URL include. [Rafael]\n- Management command update_index: Use last seen max pk for selecting\n  batch starting point. [Martin J. Laubach]\n\n  This fixes (or at least mitigates) issue #1077 for the synchronous update case.\n\n\nv2.6.1 (2017-05-15)\n-------------------\n- PEP-8. [Chris Adams]\n- Update SearchBackend.update signature to match implementations. [Chris\n  Adams]\n\n  Every actual SearchBackend implementation had this but the base class\n  did not and that could cause confusion for external projects - e.g.\n\n  https://github.com/notanumber/xapian-haystack/commit/d3f1e011da3d9bebd88c78fe7a87cd6171ae650c\n- Update SearchIndex get_backend API (closes #663) [Chris Adams]\n\n  Make _get_backend a proper public method since it’s\n  recommended by at least one part of the documentation.\n- Extract_file_contents will pass extra keyword arguments to pysolr\n  (#1505) [Chris Adams]\n\n  Thanks to @guglielmo for the patch\n- Extract_file_contents accept extra arguments. [Guglielmo Celata]\n\n  so that it may be used to extract content in textual format, instead of using XML, for example\n- PEP-8 line-lengths and whitespace. [Chris Adams]\n- Better handling of empty lists in field preparation. [Chris Adams]\n\n  Merge pull request #1369 from janwin/fix-empty-list-convert\n- Cherrypick Terr/django-\n  haystack/commit/45293cafbed0ef6aeb145ce55573eb32b1e4981f. [janpleines]\n- Make empty lists return null or default. [janpleines]\n- Merge pull request #1483 from barseghyanartur/patch-1. [Chris Adams]\n\n  Update tutorial.rst\n- Update tutorial.rst. [Artur Barseghyan]\n\n  Added elasticsearch 2.x setting example.\n- SearchView: always include spelling suggestions. [Josh Goodwin]\n\n  Previously a search which returned no results would not have the\n  \"suggestion\" context variable present. Now it will be defined but None.\n\n  Thanks to Joshua Goodwin (@jclgoodwin) for the patch.\n\n  Closes #644\n- Update changelog. [Chris Adams]\n- Merge pull request #1469 from stephenpaulger/patch-1. [Chris Adams]\n\n  Add 2.6.X docs link to README.\n- Add 2.6.X docs link to README. [Stephen Paulger]\n\n\nv2.6.0 (2017-01-04)\n-------------------\n- Update changelog. [Chris Adams]\n- Merge #1460: backend support for Elasticsearch 2.x. [Chris Adams]\n\n  Thanks to João Junior (@joaojunior) and Bruno Marques (@ElSaico) for the\n  patch\n\n  Closes #1460\n  Closes #1391\n  Closes #1336\n  Closes #1247\n- Docs: update Elasticsearch support status. [Chris Adams]\n- Tests: avoid unrelated failures when elasticsearch is not installed.\n  [Chris Adams]\n\n  This avoids spurious failures in tests for other search engines when the\n  elasticsearch client library is not installed at all but the ES backend\n  is still declared in the settings.\n- Tests: friendlier log message for ES version checks. [Chris Adams]\n\n  This avoids a potentially scary-looking ImportError flying by in the\n  test output for what's expected in normal usage.\n- Tests: update ES version detection in settings. [Chris Adams]\n\n  This allows the tests to work when run locally or otherwise outside of\n  our Travis / Tox scripts by obtaining the version from the installed\n  `elasticsearch` client library.\n- Tests: update ES1 client version check message. [Chris Adams]\n\n  The name of the Python module changed over time and this now matches the\n  ES2 codebase behaviour of having the error message give you the exact\n  package to install including the version.\n- Update travis script with ES documentation. [Chris Adams]\n\n  Add a comment for anyone wondering why this isn't a simple\n  `add-apt-repository` call\n- Fixed More Like This test with deferred query on Elasticsearch 2.x.\n  [Bruno Marques]\n- Fixed expected query behaviour on ES2.x test. [Bruno Marques]\n- Install elasticsearch2.0 via apt. [joaojunior]\n- Install elasticsearch2.0 via apt. [joaojunior]\n- Remove typo. [joaojunior]\n- Remove services elasticsearch. [joaojunior]\n- Fix typo. [joaojunior]\n- Sudo=true in .travis.yml to install elasticsearch from apt-get.\n  [joaojunior]\n- Fix .travis. [joaojunior]\n- Add logging in __init__ tests elasticsearch. [joaojunior]\n- Get changes from Master to resolve conflicts. [joaojunior]\n- Install elasticsearch1.7 via apt. [joaojunior]\n- Update Files to run tests in Elasticsearch2.x. [joaojunior]\n- Refactoring the code in pull request #1336 . This pull request is to\n  permit use ElasticSearch 2.X. [joaojunior]\n- Improved custom object identifier test. [Chris Adams]\n\n  This provides an example for implementors and ensures that failing to\n  use the custom class would cause a test failure.\n- Update management backend documentation for `--using` [flinkflonk]\n\n  Thanks to @flinkflonk for the patch!\n\n  Closes #1215\n- Fix filtered \"more like this\" queries (#1459) [David Cook]\n\n  Now the Solr backend correctly handles a `more_like_this()` query which is subsequently `filter()`-ed.\n\n  Thanks to @divergentdave for the patch and tests!\n- ReStructuredText link format fixes. (#1458) [John Heasly]\n- Add note to Backend Support docs about lack of ES 5.X support. (#1457)\n  [John Heasly]\n- Replace deprecated Point.get_coords() calls. [Chris Adams]\n\n  This works as far back as Django 1.8, which is the earliest which we\n  support.\n\n  See #1454\n- Use setuptools_scm to manage package version numbers. [Chris Adams]\n\n\nv2.5.1 (2016-10-28)\n-------------------\n\nNew\n~~~\n- Support for Django 1.10. [Chris Adams]\n\n  Thanks to Morgan Aubert (@ellmetha) for the patch\n\n  Closes #1434\n  Closes #1437\n  Closes #1445\n\nFix\n~~~\n- Contains filter, add endswith filter. [Antony]\n\n  * `__contains` now works in a more intuitive manner (the previous behaviour remains the default for `=` shortcut queries and can be requested explicitly with `__content`)\n  * `__endswith` is now supported as the logical counterpart to `__startswith`\n\n  Thanks to @antonyr for the patch and @sebslomski for code review and testing.\n\nOther\n~~~~~\n- V2.5.1. [Chris Adams]\n- Add support for Django 1.10 (refs: #1437, #1434) [Morgan Aubert]\n- Docs: fix Sphinx hierarchy issue. [Chris Adams]\n- Fix multiprocessing regression in update_index. [Chris Adams]\n\n  4e1e2e1c5df1ed1c5432b9d26fcb9dc1abab71f4 introduced a bug because it\n  used a property name which exists on haystack.ConnectionHandler but not\n  the Django ConnectionHandler class it's modeled on. Long-term, we should\n  rename the Haystack class to something like `SearchConnectionHandler`\n  to avoid future confusion.\n\n  Closes #1449\n- Doc: cleanup searchindex_api.rst. [Jack Norman]\n\n  Thanks to Jack Norman (@jwnorman) for the patch\n- Merge pull request #1444 from jeremycline/master. [Chris Adams]\n\n  Upgrade setuptools in Travis so urllib3-1.18 installs\n- Upgrade setuptools in Travis so urllib3-1.18 installs. [Jeremy Cline]\n\n  The version of setuptools in Travis is too old to handle <= as an\n  environment marker.\n- Tests: accept Solr/ES config from environment. [Chris Adams]\n\n  This makes it easy to override these values for e.g. running test\n  instances using Docker images with something like this:\n\n  ```\n  TEST_ELASTICSEARCH_1_URL=\"http://$(docker port elasticsearch-1.7 9200/tcp)/\" TEST_SOLR_URL=\"http://$(docker port solr-6 8983/tcp)/solr/\" test_haystack/run_tests.py\n  ```\n\n  See #1408\n- Merge pull request #1418 from Alkalit/master. [Steve Byerly]\n\n  Added link for 2.5.x version docs\n- Added link for 2.5.x version. [Alexey Kalinin]\n- Merge pull request #1432 from farooqaaa/master. [Steve Byerly]\n\n  Added missing `--batch-size` argument for `rebuild_index` management command.\n- Added missing --batch-size argument. [Farooq Azam]\n- Merge pull request #1036 from merwok/patch-1. [Steve Byerly]\n\n  Documentation update\n- Use ellipsis instead of pass. [Éric Araujo]\n- Fix code to enable highlighting. [Éric Araujo]\n- Merge pull request #1392 from browniebroke/bugfix/doc-error. [Steve\n  Byerly]\n\n  Fix Sphinx errors in the changelog\n- Fix Sphinx errors in the changelog. [Bruno Alla]\n- Merge pull request #1341 from tymofij/solr-hl-options. [Steve Byerly]\n- Merge master > tymofij/solr-hl-options. [Steve Byerly]\n- Make solr backend accept both shortened and full-form highlighting\n  options. [Tim Babych]\n- Autoprefix 'hl.' for solr options. [Tim Babych]\n- Update gitignore to not track test artifacts. [Steve Byerly]\n- Merge pull request #1413 from tymofij/patch-2. [Steve Byerly]\n\n  typo: suite -> suit\n- Typo: suite -> suit. [Tim Babych]\n- Merge pull request #1412 from SteveByerly/highlight_sqs_docs. [Steve\n  Byerly]\n\n  improve sqs highlight docs - illustrate custom parameters\n- Improve highlight docs for custom options. [Steve Byerly]\n\n\nv2.5.0 (2016-07-12)\n-------------------\n\nNew\n~~~\n- SearchQuerySet.set_spelling_query for custom spellcheck. [Chris Adams]\n\n  This makes it much easier to customize the text sent to the\n  backend search engine for spelling suggestions independently\n  from the actual query being executed.\n- Support ManyToManyFields in model_attr lookups. [Arjen Verstoep]\n\n  Thanks to @Terr for the patch\n- `update_index` will retry after backend failures. [Gilad Beeri]\n\n  Now `update_index` will retry failures multiple times before aborting\n  with a progressive time delay.\n\n  Thanks to Gilad Beeri (@giladbeeri) for the patch\n- `highlight()` accepts custom values on Solr and ES. [Chris Adams]\n\n  This allows the default values to be overriden and arbitrary\n  backend-specific parameters may be provided to Solr or ElasticSearch.\n\n  Thanks to @tymofij for the patch\n\n  Closes #1334\n- Allow Routers to return multiple indexes. [Chris Adams]\n\n  Thanks to Hugo Chargois (@hchargois) for the patch\n\n  Closes #1337\n  Closes #934\n- Support for newer versions of Whoosh. [Chris Adams]\n- Split SearchView.create_response into get_context. [Chris Adams]\n\n  This makes it easy to override the default `create_response` behaviour\n  if you don't want a standard HTML response.\n\n  Thanks @seocam for the patch\n\n  Closes #1338\n- Django 1.9 support thanks to Claude Paroz. [Chris Adams]\n- Create a changelog using gitchangelog. [Chris Adams]\n\n  This uses `gitchangelog <https://github.com/vaab/gitchangelog>`_ to\n  generate docs/changelog.rst from our Git commit history using the tags\n  for each version. The configuration is currently tracking upstream\n  exactly except for our version tags being prefixed with \"v\".\n\nChanges\n~~~~~~~\n- Support for Solr 5+ spelling suggestion format. [Chris Adams]\n- Set install requirements for Django versions. [Chris Adams]\n\n  This will prevent accidentally breaking apps when Django 1.10 is\n  released.\n\n  Closes #1375\n- Avoid double-query for queries matching no results. [Chris Adams]\n- Update supported/tested Django versions. [Chris Adams]\n\n  * setup.py install_requires uses `>=1.8` to match our current test\n    matrix\n  * Travis allows failures for Django 1.10 so we can start tracking the\n    upcoming release\n- Make backend subclassing easier. [Chris Adams]\n\n  This change allows the backend build_search_kwargs to\n  accept arbitrary extra arguments, making life easier for authors of `SearchQuery` or `SearchBackend` subclasses when they can directly pass a value which is directly supported by the backend search client.\n- Update_index logging & multiprocessing improvements. [Chris Adams]\n\n  * Since older versions of Python are no longer supported we no\n    longer conditionally import multiprocessing (see #1001)\n  * Use multiprocessing.log_to_stderr for all messages\n  * Remove previously-disabled use of the multiprocessing workers for index removals, allowing the worker code to be simplified\n- Moved signal processor loading to app_config.ready. [Chris Adams]\n\n  Thanks to @claudep for the patch\n\n  Closes #1260\n- Handle `__in=[]` gracefully on Solr. [Chris Adams]\n\n  This commit avoids the need to check whether a list is empty to avoid an\n  error when using it for an `__in` filter.\n\n  Closes #358\n  Closes #1311\n\nFix\n~~~\n- Attribute resolution on models which have a property named `all`\n  (#1405) [Henrique Chehad]\n\n  Thanks to Henrique Chehad (@henriquechehad) for the patch\n\n  Closes #1404\n- Tests will fall back to the Apache archive server. [Chris Adams]\n\n  The Apache 4.10.4 release was quietly removed from the mirrors without a\n  redirect. Until we have time to add newer Solr releases to the test\n  suite we'll download from the archive and let the Travis build cache\n  store it.\n- Whoosh backend support for RAM_STORE (closes #1386) [Martin Owens]\n\n  Thanks to @doctormo for the patch\n- Unsafe update_worker multiprocessing sessions. [Chris Adams]\n\n  The `update_index` management command does not handle the\n  `multiprocessing` environment safely. On POSIX systems,\n  `multiprocessing` uses `fork()` which means that when called in a\n  context such as the test suite where the connection has already been\n  used some backends like pysolr or ElasticSearch may have an option\n  socket connected to the search server and that leaves a potential race\n  condition where HTTP requests are interleaved, producing unexpected\n  errors.\n\n  This commit resets the backend connection inside the workers and has\n  been stable across hundreds of runs, unlike the current situation where\n  a single-digit number of runs would almost certainly have at least one\n  failure.\n\n  Other improvements:\n  * Improved sanity checks for indexed documents in management\n    command test suite. This wasn’t actually the cause of the\n    problem above but since I wrote it while tracking down the\n    real problem there’s no reason not to use it.\n  * update_index now checks that each block dispatched was\n    executed to catch any possible silent failures.\n\n  Closes #1376\n  See #1001\n- Tests support PyPy. [Chris Adams]\n\n  PyPy has an optimization which causes it to call __len__ when running a\n  list comprehension, which is the same thing Python does for\n  `list(iterable)`. This commit simply changes the test code to always use\n  `list` the PyPy behaviour matches CPython.\n- Avoid an extra query on empty spelling suggestions. [Chris Adams]\n\n  None was being used as a placeholder to test whether to run\n  a spelling suggestion query but was also a possible response\n  when the backend didn’t return a suggestion, which meant\n  that calling `spelling_suggestion()` could run a duplicate\n  query.\n- MultiValueField issues with single value (#1364) [Arjen Verstoep]\n\n  Thanks to @terr for the patch!\n- Queryset slicing and reduced code duplication. [Craig de Stigter]\n\n  Now pagination will not lazy-load all earlier pages before returning the\n  result.\n\n  Thanks to @craigds for the patch\n\n  Closes #1269\n  Closes #960\n- Handle negative timestamps returned from ES. [Chris Adams]\n\n  Elastic search can return negative timestamps for histograms if the\n  dates are pre-1970. This PR properly handles these pre-1970 dates.\n\n  Thanks to @speedplane for the patch\n\n  Closes #1239\n- SearchMixin allows form initial values. [Chris Adams]\n\n  Thanks to @ahoho for the patch\n\n  Closes #1319\n- Graceful handling of empty __in= lists on ElasticSearch. [Chris Adams]\n\n  Thanks to @boulderdave for the ES version of #1311\n\n  Closes #1335\n\nOther\n~~~~~\n- Docs: update unsupported backends notes. [Chris Adams]\n\n  * Officially suggest developing backends as separate projects\n  * Recommend Sphinx users consider django-sphinxql\n- V2.5.0. [Chris Adams]\n- Bump version to 2.5.dev2. [Chris Adams]\n- AUTHORS. [Tim Babych]\n- Expand my username into name in changelog.txt. [Tim Babych]\n- Corrected non-ascii characters in comments. (#1390) [Mark Walker]\n- Add lower and upper bounds for django versions. [Simon Hanna]\n- Convert readthedocs link for their .org -> .io migration for hosted\n  projects. [Adam Chainz]\n\n  As per [their blog post of the 27th April](https://blog.readthedocs.com/securing-subdomains/) ‘Securing subdomains’:\n\n  > Starting today, Read the Docs will start hosting projects from subdomains on the domain readthedocs.io, instead of on readthedocs.org. This change addresses some security concerns around site cookies while hosting user generated data on the same domain as our dashboard.\n\n  Test Plan: Manually visited all the links I’ve modified.\n- V2.5.dev1. [Chris Adams]\n- Merge pull request #1349 from sbussetti/master. [Chris Adams]\n\n  Fix logging call in `update_index`\n- Fixes improper call to logger in mgmt command. [sbussetti]\n- Merge pull request #1340 from claudep/manage_commands. [Chris Adams]\n\n  chg: migrate management commands to argparse\n- Updated management commands from optparse to argparse. [Claude Paroz]\n\n  This follows Django's same move and prevents deprecation warnings.\n  Thanks Mario César for the initial patch.\n- Merge pull request #1225 from gregplaysguitar/patch-1. [Chris Adams]\n\n  fix: correct docstring for ModelSearchForm.get_models !minor\n- Fix bogus docstring. [Greg Brown]\n- Merge pull request #1328 from claudep/travis19. [Chris Adams]\n\n  Updated test configs to include Django 1.9\n- Updated test configs to include Django 1.9. [Claude Paroz]\n- Merge pull request #1313 from chrisbrooke/Fix-elasticsearch-2.0-meta-\n  data-changes. [Chris Adams]\n- Remove boost which is now unsupported. [Chris Brooke]\n- Fix concurrency issues when building UnifiedIndex. [Chris Adams]\n\n  We were getting this error a lot when under load in a multithreaded wsgi\n  environment:\n\n      Model '%s' has more than one 'SearchIndex`` handling it.\n\n  Turns out the connections in haystack.connections and the UnifiedIndex\n  instance were stored globally. However there is a race condition in\n  UnifiedIndex.build() when multiple threads both build() at once,\n  resulting in the above error.\n\n  Best fix is to never share the same engine or UnifiedIndex across\n  multiple threads. This commit does that.\n\n  Closes #959\n  Closes #615\n- Load connection routers lazily. [Chris Adams]\n\n  Thanks to Tadas Dailyda (@skirsdeda) for the patch\n\n  Closes #1034\n  Closes #1296\n- DateField/DateTimeField accept strings values. [Chris Adams]\n\n  Now the convert method will be called by default when string values are\n  received instead of the normal date/datetime values.\n\n  Closes #1188\n- Fix doc ReST warning. [Chris Adams]\n- Merge pull request #1297 from martinsvoboda/patch-1. [Sam Peka]\n\n  Highlight elasticsearch 2.X is not supported yet\n- Highlight in docs that elasticsearch 2.x is not supported yet. [Martin\n  Svoboda]\n- Start updating compatibility notes. [Chris Adams]\n\n  * Deprecate versions of Django which are no longer\n    supported by the Django project team\n  * Update ElasticSearch compatibility messages\n  * Update Travis / Tox support matrix\n- Merge pull request #1287 from ses4j/patch-1. [Sam Peka]\n\n  Remove duplicated SITE_ID from test_haystack/settings.py\n- Remove redundant SITE_ID which was duplicated twice. [Scott Stafford]\n- Add ``fuzzy`` operator to SearchQuerySet. [Chris Adams]\n\n  This exposes the backends’ native fuzzy query support.\n\n  Thanks to Ana Carolina (@anacarolinats) and Steve Bussetti (@sbussetti)\n  for the patch.\n- Merge pull request #1281 from itbabu/python35. [Justin Caratzas]\n\n  Add python 3.5 to tests\n- Add python 3.5 to tests. [Marco Badan]\n\n  ref: https://docs.djangoproject.com/en/1.9/faq/install/#what-python-version-can-i-use-with-django\n- SearchQuerySet: don’t trigger backend access in __repr__ [Chris Adams]\n\n  This can lead to confusing errors or performance issues by\n  triggering backend access at unexpected locations such as\n  logging.\n\n  Closes #1278\n- Merge pull request #1276 from mariocesar/patch-1. [Chris Adams]\n\n  Use compatible get_model util to support new django versions\n\n  Thanks to @mariocesar for the patch!\n- Reuse haystack custom get model method. [Mario César Señoranis Ayala]\n- Removed unused import. [Mario César Señoranis Ayala]\n- Use compatible get_model util to support new django versions. [Mario\n  César Señoranis Ayala]\n- Merge pull request #1263 from dkarchmer/patch-1. [Chris Adams]\n\n  Update views_and_forms.rst\n- Update views_and_forms.rst. [David Karchmer]\n\n  After breaking my head for an hour, I realized the instructions to upgrade to class based views is incorrect. It should indicate that switch from `page` to `page_obj` and not `page_object`\n\n\nv2.3.2 (2015-11-11)\n-------------------\n- V2.3.2 maintenance update. [Chris Adams]\n- Fix #1253. [choco]\n- V2.3.2 pre-release version bump. [Chris Adams]\n- Allow individual records to be skipped while indexing. [Chris Adams]\n\n  Previously there was no easy way to skip specific objects other than\n  filtering the queryset. This change allows a prepare method to raise\n  `SkipDocument` after calling methods or making other checks which cannot\n  easily be expressed as database filters.\n\n  Thanks to Felipe Prenholato (@chronossc) for the patch\n\n  Closes #380\n  Closes #1191\n\n\nv2.4.1 (2015-10-29)\n-------------------\n- V2.4.1. [Chris Adams]\n- Minimal changes to the example project to allow test use. [Chris\n  Adams]\n- Merge remote-tracking branch 'django-haystack/pr/1261' [Chris Adams]\n\n  The commit in #1252 / #1251 was based on the assumption that the\n  tutorial used the new generic views, which is not yet correct.\n\n  This closes #1261 by restoring the wording and adding some tests to\n  avoid regressions in the future before the tutorial is overhauled.\n- Rename 'page_obj' with 'page' in the tutorial, section Search Template\n  as there is no 'page_obj' in the controller and this results giving\n  'No results found' in the search. [bboneva]\n- Style cleanup. [Chris Adams]\n\n  * Remove duplicate & unused imports\n  * PEP-8 indentation & whitespace\n  * Use `foo not in bar` instead of `not foo in bar`\n- Update backend logging style. [Chris Adams]\n\n  * Make Whoosh message consistent with the other backends\n  * Pass exception info to loggers in except: blocks\n  * PEP-8\n- Avoid unsafe default value on backend clear() methods. [Chris Adams]\n\n  Having a mutable structure like a list as a default value is unsafe;\n  this commit changes that to the standard None.\n- Merge pull request #1254 from chocobn69/master. [Chris Adams]\n\n  Update for API change in elasticsearch 1.8 (closes #1253)\n\n  Thanks to @chocobn69 for the patch\n- Fix #1253. [choco]\n- Tests: update Solr launcher for changed mirror format. [Chris Adams]\n\n  The Apache mirror-detection script appears to have changed its response\n  format recently. This change handles that and makes future error\n  messages more explanatory.\n- Bump doc version numbers - closes #1105. [Chris Adams]\n- Merge pull request #1252 from rhemzo/master. [Chris Adams]\n\n  Update tutorial.rst (closes #1251)\n\n  Thanks to @rhemzo for the patch\n- Update tutorial.rst. [rhemzo]\n\n  change page for page_obj\n- Merge pull request #1240 from speedplane/improve-cache-fill. [Chris\n  Adams]\n\n  Use a faster implementation of query result cache\n- Use a faster implementation of this horrible cache. In my tests it\n  runs much faster and uses far less memory. [speedplane]\n- Merge pull request #1149 from lovmat/master. [Chris Adams]\n\n  FacetedSearchMixin bugfixes and improvements\n\n  * Updated documentation & example code\n  * Fixed inheritance chain\n  * Added facet_fields\n\n  Thanks to @lovmat for the patch\n- Updated documentation, facet_fields attribute. [lovmat]\n- Added facet_fields attribute. [lovmat]\n\n  Makes it easy to include facets into FacetedSearchVIew\n- Bugfixes. [lovmat]\n- Merge pull request #1232 from dlo/patch-1. [Chris Adams]\n\n  Rename elasticsearch-py to elasticsearch in docs\n\n  Thanks to @dlo for the patch\n- Rename elasticsearch-py to elasticsearch in docs. [Dan Loewenherz]\n- Update wording in SearchIndex get_model exception. [Chris Adams]\n\n  Thanks to Greg Brown (@gregplaysguitar) for the patch\n\n  Closes #1223\n- Corrected exception wording. [Greg Brown]\n- Allow failures on Python 2.6. [Chris Adams]\n\n  Some of our test dependencies like Mock no longer support it. Pinning\n  Mock==1.0.1 on Python 2.6 should avoid that failure but the days of\n  Python 2.6 are clearly numbered.\n- Travis: stop testing unsupported versions of Django on Python 2.6.\n  [Chris Adams]\n- Use Travis’ matrix support rather than tox. [Chris Adams]\n\n  This avoids a layer of build setup and makes the Travis\n  console reports more useful\n- Tests: update the test version of Solr in use. [Chris Adams]\n\n  4.7.2 has disappeared from most of the Apache mirrors\n\n\nv2.4.0 (2015-06-09)\n-------------------\n- Release 2.4.0. [Chris Adams]\n- Merge pull request #1208 from ShawnMilo/patch-1. [Chris Adams]\n\n  Fix a typo in the faceting docs\n- Possible typo fix. [Shawn Milochik]\n\n  It seems that this was meant to be results.\n- 2.4.0 release candidate 2. [Chris Adams]\n- Fix Django 1.9 deprecation warnings. [Ilan Steemers]\n\n  * replaced get_model with haystack_get_model which returns the right function depending on the Django version\n  * get_haystack_models is now compliant with > Django 1.7\n\n  Closes #1206\n- Documentation: update minimum versions of Django, Python. [Chris\n  Adams]\n- V2.4.0 release candidate. [Chris Adams]\n- Bump version to 2.4.0.dev1. [Chris Adams]\n- Travis: remove Django 1.8 from allow_failures. [Chris Adams]\n- Tests: update test object creation for Django 1.8. [Chris Adams]\n\n  Several of the field tests previously assigned a related test model\n  instance before saving it::\n\n      mock_tag = MockTag(name='primary')\n      mock = MockModel()\n      mock.tag = mock_tag\n\n  Django 1.8 now validates this dodgy practice and throws an error.\n\n  This commit simply changes it to use `create()` so the mock_tag will\n  have a pk before assignment.\n- Update AUTHORS. [Chris Adams]\n- Tests: fix deprecated Manager.get_query_set call. [Chris Adams]\n- Updating haystack to test against django 1.8. [Chris Adams]\n\n  Updated version of @troygrosfield's patch updating the test-runner for\n  Django 1.8\n\n  Closes #1175\n- Travis: allow Django 1.8 failures until officially supported. [Chris\n  Adams]\n\n  See #1175\n- Remove support for Django 1.5, add 1.8 to tox/travis. [Chris Adams]\n\n  The Django project does not support 1.5 any more and it's the source of\n  most of our false-positive test failures\n- Use db.close_old_connections instead of close_connection. [Chris\n  Adams]\n\n  Django 1.8 removed the `db.close_connection` method.\n\n  Thanks to Alfredo Armanini (@phingage) for the patch\n- Fix mistake in calling super TestCase method. [Ben Spaulding]\n\n  Oddly this caused no issue on Django <= 1.7, but it causes numerous\n  errors on Django 1.8.\n- Correct unittest imports from commit e37c1f3. [Ben Spaulding]\n- Prefer stdlib unittest over Django's unittest2. [Ben Spaulding]\n\n  There is no need to fallback to importing unittest2 because Django 1.5\n  is the oldest Django we support, so django.utils.unittest is guaranteed\n  to exist.\n- Prefer stdlib OrderedDict over Django's SortedDict. [Ben Spaulding]\n\n  The two are not exactly they same, but they are equivalent for\n  Haystack's needs.\n- Prefer stdlib importlib over Django's included version. [Ben\n  Spaulding]\n\n  The app_loading module had to shuffle things a bit. When it was\n  importing the function it raised a [RuntimeError][]. Simply importing\n  the module resolved that.\n\n  [RuntimeError]: https://gist.github.com/benspaulding/f36eaf483573f8e5f777\n- Docs: explain how field boosting interacts with filter. [Chris Adams]\n\n  Thanks to @amjoconn for contributing a doc update to help newcomers\n\n  Closes #1043\n- Add tests for values/values_list slicing. [Chris Adams]\n\n  This confirms that #1019 is fixed\n- Update_index: avoid gaps in removal logic. [Chris Adams]\n\n  The original logic did not account for the way removing records\n  interfered with the pagination logic.\n\n  Closes #1194\n- Update_index: don't use workers to remove stale records. [Chris Adams]\n\n  There was only minimal gain to this because, unlike indexing, removal is\n  a simple bulk operation limited by the search engine.\n\n  See #1194\n  See #1201\n- Remove lxml dependency. [Chris Adams]\n\n  pysolr 3.3.2+ no longer requires lxml, which saves a significant install\n  dependency\n- Allow individual records to be skipped while indexing. [Chris Adams]\n\n  Previously there was no easy way to skip specific objects other than\n  filtering the queryset. This change allows a prepare method to raise\n  `SkipDocument` after calling methods or making other checks which cannot\n  easily be expressed as database filters.\n\n  Thanks to Felipe Prenholato (@chronossc) for the patch\n\n  Closes #380\n  Closes #1191\n- Update_index: avoid \"MySQL has gone away error\" with workers. [Eric\n  Bressler (Platform)]\n\n  This fixes an issue with a stale database connection being passed to\n  a multiprocessing worker when using `--remove`\n\n  Thanks to @ebressler for the patch\n\n  Closes #1201\n- Depend on pysolr 3.3.1. [Chris Adams]\n- Start-solr-test-server: avoid Travis dependency. [Chris Adams]\n\n  This will now fall back to the current directory when run outside of our Travis-CI environment\n- Fix update_index --remove handling. [Chris Adams]\n\n  * Fix support for custom keys by reusing the stored value rather than\n    regenerating following the default pattern\n  * Batch remove operations using the total number of records\n    in the search index rather than the database\n\n  Closes #1185\n  Closes #1186\n  Closes #1187\n- Merge pull request #1177 from paulshannon/patch-1. [Chris Adams]\n\n  Update TravisCI link in README\n- Update TravisCI link. [Paul Shannon]\n\n  I think the repo got changed at some point and the old project referenced at travisci doesn't exist anymore...\n- Travis: enable containers. [Chris Adams]\n\n  * Move apt-get installs to the addons/apt_packages:\n    http://docs.travis-ci.com/user/apt-packages/\n  * Set `sudo: false` to enable containers:\n    http://docs.travis-ci.com/user/workers/container-based-infrastructure/\n- Docs: correct stray GeoDjango doc link. [Chris Adams]\n- Document: remove obsolete Whoosh Python 3 warning. [Chris Adams]\n\n  Thanks to @gitaarik for the pull request\n\n  Closes #1154\n  Fixes #1108\n- Remove method_decorator backport (closes #1155) [Chris Adams]\n\n  This was no longer used anywhere in the Haystack source or documentation\n- Travis: enable APT caching. [Chris Adams]\n- Travis: update download caching. [Chris Adams]\n- App_loading cleanup. [Chris Adams]\n\n  * Add support for Django 1.7+ AppConfig\n  * Rename internal app_loading functions to have haystack_ prefix to make\n    it immediately obvious that they are not Django utilities and start\n  * Add tests to avoid regressions for apps nested with multiple levels of\n    module hierarchy like `raven.contrib.django.raven_compat`\n  * Refactor app_loading logic to make it easier to remove the legacy\n    compatibility code when we eventually drop support for older versions\n    of Django\n\n  Fixes #1125\n  Fixes #1150\n  Fixes #1152\n  Closes #1153\n- Switch defaults closer to Python 3 defaults. [Chris Adams]\n\n  * Add __future__ imports:\n\n  isort --add_import 'from __future__ import absolute_import, division, print_function, unicode_literals'\n\n  * Add source encoding declaration header\n- Setup.py: use strict PEP-440 dev version. [Chris Adams]\n\n  The previous version was valid as per PEP-440 but triggers a warning in\n  pkg_resources\n- Merge pull request #1146 from kamilmowinski/patch-1. [Chris Adams]\n\n  Fix typo in SearchResult documentation\n- Update searchresult_api.rst. [kamilmowinski]\n- Merge pull request #1143 from wicol/master. [Chris Adams]\n\n  Fix deprecation warnings in Django 1.6.X (thanks @wicol)\n- Fix deprecation warnings in Django 1.6.X. [Wictor]\n\n  Options.model_name was introduced in Django 1.6 together with a deprecation warning:\n  https://github.com/django/django/commit/ec469ade2b04b94bfeb59fb0fc7d9300470be615\n- Travis: move tox setup to before_script. [Chris Adams]\n\n  This should cause dependency installation problems to show up as build\n  errors rather than outright failures\n- Update ElasticSearch defaults to allow autocompleting numbers. [Chris\n  Adams]\n\n  Previously the defaults for ElasticSearch used the `lowercase`\n  tokenizer, which prevented numbers from being autocompleted.\n\n  Thanks to Phill Tornroth (@phill-tornroth) for contributing a patch\n  which changes the default settings to use the `standard` tokenizer\n  with the `lowercase` filter\n\n  Closes #1056\n- Update documentation for new class-based views. [Chris Adams]\n\n  Thanks to @troygrosfield for the pull-request\n\n  Closes #1139\n  Closes #1133\n  See #1130\n- Added documentation for configuring facet behaviour. [Chris Adams]\n\n  Thanks to Philippe Luickx for the contribution\n\n  Closes #1111\n- UnifiedIndex has a stable interface to get all indexes. [Chris Adams]\n\n  Previously it was possible for UnifiedIndexes.indexes to be empty when\n  called before the list had been populated. This change deprecates\n  accessing `.indexes` directly in favor of a `get_indexes()` accessor\n  which will call `self.build()` first if necessary.\n\n  Thanks to Phill Tornroth for the patch and tests.\n\n  Closes #851\n- Add support for SQ in SearchQuerySet.narrow() (closes #980) [Chris\n  Adams]\n\n  Thanks to Andrei Fokau (@andreif) for the patch and tests\n- Disable multiprocessing on Python 2.6 (see #1001) [Chris Adams]\n\n  multiprocessing.Pool.join() hangs reliably on Python 2.6 but\n  not any later version tested. Since this is an optional\n  feature we’ll simply disable it\n- Bump version number to 2.4.0-dev. [Chris Adams]\n- Update_index: wait for all pool workers to finish. [Chris Adams]\n\n  There was a race condition where update_index() would return\n  before all of the workers had finished updating Solr. This\n  manifested itself most frequently as Travis failures\n  for the multiprocessing test (see #1001).\n- Tests: Fix ElasticSearch index setup (see #1093) [Chris Adams]\n\n  Previously when clear_elasticsearch_index() was called to\n  reset the tests, this could produce confusing results\n  because it cleared the mappings without resetting the\n  backend’s setup_complete status and thus fields which were\n  expected to have a specific type would end up being inferred\n\n  With this changed test_regression_proper_start_offsets and\n  test_more_like_this no longer fail\n- Update rebuild_index --nocommit handling and add tests. [Chris Adams]\n\n  rebuild_index builds its option list by combining the options from\n  clear_index and update_index. This previously had a manual exclude list\n  for options which were present in both commands to avoid conflicts but\n  the nocommit option wasn't in that list.\n\n  This wasn't tested because our test suite uses call_command rather than\n  invoking the option parser directly.\n\n  This commit also adds tests to confirm that --nocommit will actually\n  pass commit=False to clear_index and update_index.\n\n  Closes #1140\n  See #1090\n- Support ElasticSearch 1.x distance filter syntax (closes #1003) [Chris\n  Adams]\n\n  The elasticsearch 1.0 release was backwards incompatible\n  with our previous usage.\n\n  Thanks to @dulaccc for the patch adding support.\n- Docs: add Github style guide link to pull request instructions. [Chris\n  Adams]\n\n  The recent Github blog post makes a number of good points:\n\n  https://github.com/blog/1943-how-to-write-the-perfect-pull-request\n- Fixed exception message when resolving model_attr. [Wictor]\n\n  This fixes the error message displayed when model_attr references an\n  unknown attribute.\n\n  Thanks to @wicol for the patch\n\n  Closes #1094\n- Compatibility with Django 1.7 app loader (see #1097) [Chris Adams]\n\n  * Added wrapper around get_model, so that Django 1.7 uses the new app\n    loading mechanism.\n  * Added extra model check to prevent that a simple module is treated as\n    model.\n\n  Thanks to Dirk Eschler (@deschler) for the patch.\n- Fix index_fieldname to match documentation (closes #825) [Chris Adams]\n\n  @jarig contributed a fix to ensure that index_fieldname renaming does\n  not interfere with using the field name declared on the index.\n- Add tests for Solr/ES spatial order_by. [Chris Adams]\n\n  This exists primarily to avoid the possibility of breaking\n  compatibility with the inconsistent lat, lon ordering used\n  by Django, Solr and ElasticSearch.\n- Remove undocumented `order_by_distance` [Chris Adams]\n\n  This path was an undocumented artifact of the original\n  geospatial feature-branch back in the 1.X era. It wasn’t\n  documented and is completely covered by the documented API.\n- ElasticSearch tests: PEP-8 cleanup. [Chris Adams]\n- Implement managers tests for spatial features. [Chris Adams]\n\n  This is largely shadowed by the actual spatial tests but it\n  avoids surprises on the query generation\n\n  * Minor PEP-8\n- Remove unreferenced add_spatial methods. [Chris Adams]\n\n  SolrSearchQuery and ElasticsearchSearchQuery both defined\n  an `add_spatial` method which was neither called nor\n  documented.\n- Remove legacy httplib/httplib2 references. [Chris Adams]\n\n  We’ve actually delegated the actual work to requests but the\n  docs & tests had stale references\n- Tests: remove legacy spatial backend code. [Chris Adams]\n\n  This has never run since the solr_native_distance backend\n  did not exist and thus the check always failed silently\n- ElasticSearch backend: minor PEP-8 cleanup. [Chris Adams]\n- Get-solr-download-url: fix Python 3 import path. [Chris Adams]\n\n  This allows the scripts to run on systems where Python 3 is\n  the default version\n- Merge pull request #1130 from troygrosfield/master. [Chris Adams]\n\n  Added generic class based search views\n\n  (thanks @troygrosfield)\n- Removed \"expectedFailure\". [Troy Grosfield]\n- Minor update. [Troy Grosfield]\n- Added tests for the generic search view. [Troy Grosfield]\n- Hopefully last fix for django version checking. [Troy Grosfield]\n- Fix for django version check. [Troy Grosfield]\n- Adding fix for previously test for django 1.7. [Troy Grosfield]\n- Adding py34-django1.7 to travis. [Troy Grosfield]\n- Test for the elasticsearch client. [Troy Grosfield]\n- Added unicode_literals import for py 2/3 compat. [Troy Grosfield]\n- Added generic class based search views. [Troy Grosfield]\n- Merge pull request #1101 from iElectric/nothandledclass. [Chris Adams]\n\n  Report correct class when raising NotHandled\n- Report correct class when raising NotHandled. [Domen Kožar]\n- Merge pull request #1090 from andrewschoen/feature/no-commit-flag.\n  [Chris Adams]\n\n  Adds a --nocommit arg to the update_index, clear_index and rebuild_index management command.\n- Adds a --nocommit arg to the update_index, clear_index and\n  rebuild_index management commands. [Andrew Schoen]\n- Merge pull request #1103 from pkafei/master. [Chris Adams]\n\n  Update documentation to reference Solr 4.x\n- Changed link to official archive site. [Portia Burton]\n- Added path to schema.xml. [Portia Burton]\n- Added latest version of Solr to documentation example. [Portia Burton]\n- Update ElasticSearch version requirements. [Chris Adams]\n- Elasticsearch's python api by default has _source set to False, this\n  causes keyerror mentioned in bug #1019. [xsamurai]\n- Solr: clear() won’t call optimize when commit=False. [Chris Adams]\n\n  An optimize will trigger a commit implicitly so we’ll avoid\n  calling it when the user has requested not to commit\n- Bumped __version__ (closes #1112) [Dan Watson]\n- Travis: allow PyPy builds to fail. [Chris Adams]\n\n  This is currently unstable and it's not a first-class supported platform\n  yet\n- Tests: fix Solr server tarball test. [Chris Adams]\n\n  On a clean Travis instance, the tarball won't exist\n- Tests: have Solr test server startup script purge corrupt tarballs.\n  [Chris Adams]\n\n  This avoids tests failing if a partial download is cached by Travis\n- Merge pull request #1084 from streeter/admin-mixin. [Daniel Lindsley]\n\n  Document and add an admin mixin\n- Document support for searching in the Django admin. [Chris Streeter]\n- Add some spacing. [Chris Streeter]\n- Create an admin mixin for external use. [Chris Streeter]\n\n  There are cases where one might have a different base admin class, and\n  wants to use the search features in the admin as well. Creating a mixin\n  makes this a bit cleaner.\n\n\nv2.3.1 (2014-09-22)\n-------------------\n- V2.3.1. [Chris Adams]\n- Tolerate non-importable apps like django-debug-toolbar. [Chris Adams]\n\n  If your installed app isn't even a valid Python module, haystack will\n  issue a warning but continue.\n\n  Thanks to @gojomo for the patch\n\n  Closes #1074\n  Closes #1075\n- Allow apps without models.py on Django <1.7. [Chris Adams]\n\n  This wasn't officially supported by Django prior to 1.7 but is used by\n  some third-party apps such as Grappelli\n\n  This commit adds a somewhat contrived test app to avoid future\n  regressions by ensuring that the test suite always has an application\n  installed which does not have models.py\n\n  See #1073\n\n\nv2.3.0 (2014-09-19)\n-------------------\n- Travis: Enable IRC notifications. [Chris Adams]\n- Fix app loading call signature. [Chris Adams]\n\n  Updated code from #1016 to ensure that get_models always\n  returns a list (previously on Django 1.7 it would return\n  the bare model when called with an argument of the form\n  `app.modelname`)\n\n  Add some basic tests\n- App loading: use ImproperlyConfigured for bogus app names. [Chris\n  Adams]\n\n  This never worked but we’ll be more consistent and return\n  ImproperlyConfigured instead of a generic LookupError\n- App Loading: don’t suppress app-registry related exceptions. [Chris\n  Adams]\n\n  This is just asking for trouble in the future. If someone comes up with\n  an edge case, we should add a test for it\n- Remove Django version pin from install_requires. [Chris Adams]\n- Django 1.7 support for app discovery. [Chris Adams]\n\n  * Refactored @Xaroth’s patch from #1015 into a separate utils\n    module\n  * PEP-8 cleanup\n- Start the process of updating for v2.3 release. [Chris Adams]\n- Django 1.7 compatibility for model loading. [Chris Adams]\n\n  This refactors the previous use of model _meta.module_name and updates\n  the tests so the previous change can be tested safely.\n\n  Closes #981\n  Closes #982\n- Update tox Django version pins. [Chris Adams]\n- Mark expected failures for Django 1.7 (see #1069) [Chris Adams]\n- Django 1.7: ensure that the app registry is ready before tests are\n  loaded. [Chris Adams]\n\n  The remaining test failures are due to some of the oddities in model\n  mocking, which can be solved by overhauling the way we do tests and\n  mocks.\n- Tests: Whoosh test overhaul. [Chris Adams]\n\n  * Move repetitive filesystem reset logic into WhooshTestCase which\n    cleans up after itself\n  * Use mkdtemp instead of littering up the current directory with a\n    'tmp' subdirectory\n  * Use skipIf rather than expectFailure on test_writable to disable\n    it only when STORAGE=ram rather than always\n- Unpin elasticsearch library version for testing. [Chris Adams]\n- Tests: add MIDDLEWARE_CLASSES for Django 1.7. [Chris Adams]\n- Use get_model_ct_tuple to generate template name. [Chris Adams]\n- Refactor simple_backend to use get_model_ct_tuple. [Chris Adams]\n- Haystack admin: refactor to use get_model_ct_tuple. [Chris Adams]\n- Consolidate model meta references to use get_model_ct (see #981)\n  [Chris Adams]\n\n  This use of a semi-public Django interface will break in Django 1.7\n  and we can start preparing by using the existing\n  haystack.utils.get_model_ct function instead of directly accessing\n  it everywhere.\n- Refactor get_model_ct to handle Django 1.7, add tuple version. [Chris\n  Adams]\n\n  We have a mix of model _meta access which usually expects strings but in\n  a few places needs raw values. This change adds support for Django 1.7\n  (see https://code.djangoproject.com/ticket/19689) and allows raw tuple\n  access to handle other needs in the codebase\n- Add Django 1.7 warning to Sphinx docs as well. [Chris Adams]\n\n\nv2.2.1 (2014-09-03)\n-------------------\n- Mark 2.2.X as incompatible with Django 1.7. [Chris Adams]\n- Tests: don't suppress Solr stderr logging. [Chris Adams]\n\n  This will make easier to tell why Solr sometimes goes away on Travis\n- Update Travis & Tox config. [Chris Adams]\n\n  * Tox: wait for Solr to start before running tests\n  * Travis: allow solr & pip downloads to be cached\n  * Travis now uses start-solr-test-server.sh instead of travis-solr\n  * Test Solr configuration uses port 9001 universally as per the\n    documentation\n  * Change start-solr-test-server.sh to change into its containing\n    directory, which also allows us to remove the realpath dependency\n  * Test Solr invocation matches pysolr\n      * Use get-solr-download-url script to pick a faster mirror\n      * Upgrade to Solr 4.7.2\n- Travis, Tox: add Django 1.7 targets. [Chris Adams]\n- Merge pull request #1055 from andreif/feature/realpath-fallback-osx.\n  [Chris Adams]\n- Fallback to pwd if realpath is not available. [Andrei Fokau]\n- Merge pull request #1053 from gandalfar/patch-1. [Chris Adams]\n- Update example for Faceting to reference page.object_list. [Jure\n  Cuhalev]\n\n  Instead of `results` - ref #1052\n- Add PyPy targets to Tox & Travis. [Chris Adams]\n\n  Closes #1049\n- Merge pull request #1044 from areski/patch-1. [Chris Adams]\n\n  Update Xapian install instructions (thanks @areski)\n- Update Xapian install. [Areski Belaid]\n- Docs: fix signal processors link in searchindex_api. [Chris Adams]\n\n  Correct a typo in b676b17dbc4b29275a019417e7f19f531740f05e\n- Merge pull request #1050 from jogwen/patch-2. [Chris Adams]\n- Link to 'signal processors' [Joanna Paulger]\n- Merge pull request #1047 from g3rd/patch-1. [Chris Adams]\n\n  Update the installing search engine documentation URL (thanks @g3rd)\n- Fixed the installing search engine doc URL. [Chad Shrock]\n- Merge pull request #1025 from reinout/patch-1. [Chris Adams]\n\n  Fixed typo in templatetag docs example (thanks to @reinout)\n- Fixed typo in example. [Reinout van Rees]\n\n  It should be `css_class` in the template tag example instead of just `class`. (It is mentioned correctly in the syntax line earlier).\n\n\nv2.2.0 (2014-08-03)\n-------------------\n- Release v2.2.0. [Chris Adams]\n- Test refactor - merge all the tests into one test suite (closes #951)\n  [Chris Adams]\n\n  Major refactor by @honzakral which stabilized the test suite, makes it easier to run and add new tests and\n  somewhat faster, too.\n\n  * Merged all the tests\n  * Mark tests as skipped when a backend is not available (e.g. no ElasticSearch or Solr connection)\n  * Massively simplified test runner (``python setup.py test``)\n\n  Minor updates:\n  * Travis:\n      - Test Python 3.4\n      - Use Solr 4.6.1\n  * Simplified legacy test code which can now be replaced by the test utilities in newer versions of Django\n  * Update ElasticSearch client & tests for ES 1.0+\n  * Add option for SearchModelAdmin to specify the haystack connection to use\n  * Fixed a bug with RelatedSearchQuerySet caching using multiple instances (429d234)\n- RelatedSearchQuerySet: move class globals to instance properties.\n  [Chris Adams]\n\n  This caused obvious failures in the test suite and presumably\n  elsewhere when multiple RelatedSearchQuerySet instances were in use\n- Merge pull request #1032 from maikhoepfel/patch-1. [Justin Caratzas]\n\n  Drop unused variable when post-processing results\n- Drop unused variable when post-processing results. [Maik Hoepfel]\n\n  original_results is not used in either method, and can be safely removed.\n- 404 when initially retrieving mappings is ok. [Honza Král]\n- Ignore 400 (index already exists) when creating an index in\n  Elasticsearch. [Honza Král]\n- ElasticSearch: update clear() for 1.x+ syntax. [Chris Adams]\n\n  As per http://www.elasticsearch.org/guide/en/elasticsearch/reference/1.x/docs-delete-by-query.html this should be nested inside a\n  top-level query block:\n\n  {“query”: {“query_string”: …}}\n- Add setup.cfg for common linters. [Chris Adams]\n- ElasticSearch: avoid KeyError for empty spelling. [Chris Adams]\n\n  It was possible to get a KeyError when spelling suggestions were\n  requested but no suggestions are returned by the backend.\n\n  Thanks to Steven Skoczen (@skoczen) for the patch\n- Merge pull request #970 from tobych/patch-3. [Justin Caratzas]\n\n  Improve punctuation in super-scary YMMV warning\n- Improve punctuation in super-scary YMMV warning. [Toby Champion]\n- Merge pull request #969 from tobych/patch-2. [Justin Caratzas]\n\n  Fix typo; clarify purpose of search template\n- Fix typo; clarify purpose of search template. [Toby Champion]\n- Merge pull request #968 from tobych/patch-1. [Justin Caratzas]\n\n  Fix possessive \"its\" in tutorial.rst\n- Fix possessive \"its\" [Toby Champion]\n- Merge pull request #938 from Mbosco/patch-1. [Daniel Lindsley]\n\n  Update tutorial.rst\n- Update tutorial.rst. [BoscoMW]\n- Fix logging call in SQS post_process_results (see #648) [Chris Adams]\n\n  This was used in an except: handler and would only be executed when a\n  load_all() queryset retrieved a model which wasn't registered with the\n  index.\n- Merge pull request #946 from gkaplan/spatial-docs-fix. [Daniel\n  Lindsley]\n\n  Small docs fix for spatial search example code\n- Fix typo with instantiating Distance units. [Graham Kaplan]\n- Solr backend: correct usage of pysolr delete. [Chris Adams]\n\n  We use HAYSTACK_ID_FIELD in other places but the value passed to\n  pysolr's delete() method must use the keyword argument ``id``:\n\n  https://github.com/toastdriven/pysolr/blob/v3.1.0/pysolr.py#L756\n\n  Although the value is passed to Solr an XML tag named ``<id>`` it will\n  always be checked against the actual ``uniqueKey`` field even if it uses\n  a custom name:\n\n  https://wiki.apache.org/solr/UpdateXmlMessages#A.22delete.22_documents_by_ID_and_by_Query\n\n  Closes #943\n- Add a note on elasticsearch-py versioning with regards to 1.0. [Honza\n  Král]\n- Ignore 404 when removing a document from elasticsearch. [Honza Král]\n\n  Fixes #942\n- Ignore missing index during .clear() [Honza Král]\n\n  404 in indices.delete can only mean that the index is there, no issue\n  for a delete operation\n\n  Fixes #647\n- Tests: remove legacy targets. [Chris Adams]\n\n  * Django 1.4 is no longer supported as per the documentation\n  * Travis: use Python 3.3 targets instead of 3.2\n- Tests: update pysolr requirement to 3.1.1. [Chris Adams]\n\n  3.1.1 shipped a fix for a change in the Solr response format for the\n  content extraction handler\n- Merge pull request #888 from acdha/888-solr-field-list-regression.\n  [Chris Adams]\n\n  Solr / ElasticSearch backends: restore run() kwargs handling\n\n  This fixes an earlier regression which did not break functionality but made `.values()` and `.values_list()` much less of an optimization than intended.\n\n  #925 will be a more comprehensive refactor but this is enough of a performance win to be worth including if a point release happens before #925 lands.\n- ElasticSearch backend: run() kwargs are passed directly to search\n  backend. [Chris Adams]\n\n  This allows customization by subclasses and also fixes #888\n  by ensuring that the custom field list prepared by\n  `ValuesQuerySet` and `ValuesListQuerySet` is actually used.\n- Solr backend: run() kwargs are passed directly to search backend.\n  [Chris Adams]\n\n  This allows customization by subclasses and also fixes #888\n  by ensuring that the custom field list prepared by\n  `ValuesQuerySet` and `ValuesListQuerySet` is actually used.\n- Tests: skip Solr content extraction with old PySolr. [Chris Adams]\n\n  Until pysolr 3.1.1 ships there's no point in running the Solr content\n  extraction tests because they'll fail:\n\n  https://github.com/toastdriven/pysolr/pull/104\n- Make sure DJANGO_CT and DJANGO_ID fields are not analyzed. [Honza\n  Král]\n- No need to store fields separately in elasticsearch. [Honza Král]\n\n  That will justlead to fields being stored once - as part of _source as\n  well as in separate index that would never be used by haystack (would be\n  used only in special cases when requesting just that field, which can\n  be, with minimal overhead, still just extracted from the _source as it\n  is).\n- Remove extra code. [Honza Král]\n- Simplify mappings for elasticsearch fields. [Honza Král]\n\n  - don't specify defaults (index:analyzed for strings, boost: 1.0)\n  - omit extra settings that have little or negative effects\n    (term_vector:with_positions_offsets)\n  - only use type-specific settings (not_analyzed makes no sense for\n    non-string types)\n\n  Fixes #866\n- Add narrow queries as individual subfilter to promote caching. [Honza\n  Král]\n\n  Each narrow query will be cached individually which means more cache\n  reuse\n- Doc formatting fix. [Honza Král]\n- Allow users to pass in additional kwargs to Solr and Elasticsearch\n  backends. [Honza Král]\n\n  Fixes #674, #862\n- Whoosh: allow multiple order_by() fields. [Chris Adams]\n\n  The Whoosh backend previously prevented the use of more than one\n  order_by field. It now allows multiple fields as long as every field\n  uses the same sort direction.\n\n  Thanks to @qris, @overflow for the patch\n\n  Closes #627\n  Closes #919\n- Fix bounding box calculation for spatial queries (closes #718) [Chris\n  Adams]\n\n  Thanks @jasisz for the fix\n- Docs: fix ReST syntax error in searchqueryset_api.rst. [Chris Adams]\n- Tests: update test_more_like_this for Solr 4.6. [Chris Adams]\n- Tests: update test_quotes_regression exception test. [Chris Adams]\n\n  This was previously relying on the assumption that a query would not\n  match, which is Solr version dependent, rather than simply\n  confirming that no exception is raised\n- Tests: update Solr schema to match current build_solr_schema. [Chris\n  Adams]\n\n  * Added fields used in spatial tests: location, username, comment\n  * Updated schema for recent Solr\n  * Ran `xmllint --c14n \"$*\" | xmllint --format --encode \"utf-8\" -`\n- Tests: update requirements to match tox. [Chris Adams]\n- Move test Solr instructions into a script. [Chris Adams]\n\n  These will just rot horribly if they're not actually executed on a\n  regular basis…\n- Merge pull request #907 from gam-phon/patch-1. [Chris Adams]\n- Fix url for solr 3.5.0. [Yaser Alraddadi]\n- Merge pull request #775 from stefanw/avoid-pks-seen-on-update. [Justin\n  Caratzas]\n\n  Avoid unnecessary, potentially huge db query on index update\n- Merge branch 'master' into avoid-pks-seen-on-update. [Stefan\n  Wehrmeyer]\n\n  Change smart_text into smart_bytes as in master\n\n  Conflicts:\n  \thaystack/management/commands/update_index.py\n- Upgraded python3 in tox to 3.3. [justin caratzas]\n\n  3.3 is a better target for haystack than 3.2, due to PEP414\n- Merge pull request #885 from HonzaKral/elasticsearch-py. [Justin\n  Caratzas]\n\n  Use elasticsearch-py instead of pyelasticsearch.\n- Use elasticsearch-py instead of pyelasticsearch. [Honza Král]\n\n  elasticsearch-py is the official Python client for Elasticsearch.\n- Merge pull request #899 from acdha/html5-input-type=search. [Justin\n  Caratzas]\n\n  Search form <input type=\"search\">\n- Use HTML5 <input type=search> (closes #899) [Chris Adams]\n- Update travis config so that unit tests will run with latest solr +\n  elasticsearch. [justin caratzas]\n- Merge remote-tracking branch 'HonzaKral/filtered_queries' Fixes #886.\n  [Daniel Lindsley]\n- Use terms filter for DJANGO_CT, *much* faster. [Honza Král]\n- Cleaner query composition when it comes to filters in ES. [Honza Král]\n- Fixed typo in AUTHORS. [justin caratzas]\n- Added pabluk to AUTHORS. [Pablo SEMINARIO]\n- Fixed ValueError exception when SILENTLY_FAIL=True. [Pablo SEMINARIO]\n- Merge pull request #882 from benspaulding/docs/issue-607. [Justin\n  Caratzas]\n\n  Remove bit about SearchQuerySet.load_all_queryset deprecation\n- Remove bit about SearchQuerySet.load_all_queryset deprecation. [Ben\n  Spaulding]\n\n  That method was entirely removed in commit b8048dc0e9e3.\n\n  Closes #607. Thanks to @bradleyayers for the report.\n- Merge pull request #881 from benspaulding/docs/issue-606. [Justin\n  Caratzas]\n\n  Fix documentation regarding ModelSearchIndex to match current behavior\n- Fix documentation regarding ModelSearchIndex to match current\n  behavior. [Ben Spaulding]\n\n  Closes #606. Thanks to @bradleyayers for the report.\n- Fixed #575 & #838, where a change in Whoosh 2.5> required explicitly\n  setting the Searcher.search() limit to None to restore correct\n  results. [Keryn Knight]\n\n  Thanks to scenable and Shige Abe (typeshige) for\n  the initial reports, and to scenable for finding\n  the root issue in Whoosh.\n- Removed python 1.4 / python 3.2 tox env because thats not possible.\n  [justin caratzas]\n\n  also pinned versions of requirements for testing\n- Added test for autocomplete whitespace fix. [justin caratzas]\n- Fixed autocomplete() method: spaces in query. [Ivan Virabyan]\n- Fixed basepython for tox envs, thanks --showconfig. [justin caratzas]\n\n  also, added latest django 1.4 release, which doesn't error out\n  currently.\n\n  Downgraded python3.3 to python3.2, as thats what the lastest debian\n  stable includes.  I'm working on compiling pypy and python3.3 on the\n  test box, so those will probably be re-added as time allows.\n\n  failing tests: still solr context extraction + spatial\n- Fixed simple backend for django 1.6, _fields was removed. [justin\n  caratzas]\n- [tox] run tests for 1.6, fix test modules so they are found by the new\n  test runner. [justin caratzas]\n\n  These changes are backwards-compatible with django 1.5.  As of this\n  commit, the only failing tests are the Solr extractraction test, and the\n  spatial tests.\n- Switch solr configs to solr 4. [justin caratzas]\n\n  almost all tests passing, but spatial not working\n- Update solr schema template to fix stopwords_en.txt relocation.\n  [Patrick Altman]\n\n  Seems that in versions >3.6 and >4 stopwords_en.txt moved\n  to a new location. This won't be backwards compatible for\n  older versions of solr.\n\n  Addresses issues #558, #560\n  In addition, issue #671 references this problem\n- Pass `using` to index_queryset for update. [bigjust]\n- Update tox to test pypy, py26, py27, py33, django1.5 and django1.6.\n  [bigjust]\n\n  django 1.6 doesn't actually work yet, but there are other efforts to get that working\n- Fixed my own spelling test case. How embarrassing. [Dan Watson]\n- Added a spelling test case for ElasticSearch. [Dan Watson]\n- More ElasticSearch test fixes. [Dan Watson]\n- Added some faceting tests for ElasticSearch. [Dan Watson]\n- Fixed ordering issues in the ElasticSearch tests. [Dan Watson]\n- Merge remote-tracking branch 'infoxchange/fix-elasticsearch-index-\n  settings-reset' [Daniel Lindsley]\n- Test ensuring recreating the index does not remove the mapping.\n  [Alexey Kotlyarov]\n- Reset backend state when deleting index. [Alexey Kotlyarov]\n\n  Reset setup_complete and existing_mapping when an index is\n  deleted. This ensures create_index is called later to restore\n  the settings properly.\n- Use Django's copy of six. [Dan Watson]\n- Merge pull request #847 from luisbarrueco/mgmtcmd-fix. [Dan Watson]\n\n  Fixed an update_index bug when using multiple connections\n- Fixed an update_index bug when using multiple connections. [Luis\n  Barrueco]\n- Fixed a missed raw_input call on Python 3. [Dan Watson]\n- Merge pull request #840 from postatum/fix_issue_807. [Justin Caratzas]\n\n  Fixed issue #807\n- Fixed issue #807. [postatum]\n- Merge pull request #837 from nicholasserra/signals-docs-fix. [Justin\n  Caratzas]\n\n  Tiny docs fix in signal_processors example code\n- Tiny docs fix in signal_processors example code. [Nicholas Serra]\n- Merge pull request #413 from phill-tornroth/patch-1. [Justin Caratzas]\n\n  Silly little change, I know.. but I actually ran into a case where I acci\n- Silly little change, I know.. but I actually ran into a case where I\n  accidentally passed a list of models in without *ing them. When that\n  happens, we get a string formatting exception (not all arguments were\n  formatted) instead of the useful \"that ain't a model, kid\" business.\n  [Phill Tornroth]\n- Merge pull request #407 from bmihelac/patch-1. [Justin Caratzas]\n\n  Fixed doc, ``query`` is context variable and not in request.\n- Fixed doc, ``query`` is context variable and not in request.\n  [bmihelac]\n- Merge pull request #795 from\n  davesque/update_excluded_indexes_error_message. [Justin Caratzas]\n\n  Improve error message for duplicate index classes\n- Improve error message for duplicate index classes. [David Sanders]\n\n  To my knowledge, the 'HAYSTACK_EXCLUDED_INDEXES' setting is no longer\n  used.\n- Started the v2.1.1 work. [Daniel Lindsley]\n- Avoid unnecessary db query on index update. [Stefan Wehrmeyer]\n\n  pks_seen is only needed if objects are removed from\n  index, so only compute it if necessary.\n  Improve pks_seen to not build an intermediary list.\n\n\nv2.1.0 (2013-07-28)\n-------------------\n- Bumped to v2.1.0! [Daniel Lindsley]\n- Python 3 support is done, thanks to RevSys & the PSF! Updated\n  requirements in the docs. [Daniel Lindsley]\n- Added all the new additions to AUTHORS. [Daniel Lindsley]\n- Merge branch 'py3' [Daniel Lindsley]\n- Added Python 3 compatibility notes. [Daniel Lindsley]\n- Whoosh mostly working under Python 3. See docs for details. [Daniel\n  Lindsley]\n- Backported things removed from Django 1.6. [Daniel Lindsley]\n- Final core changes. [Daniel Lindsley]\n- Solr tests all but passing under Py3. [Daniel Lindsley]\n- Elasticsearch tests passing under Python 3. [Daniel Lindsley]\n\n  Requires git master (ES 1.0.0 beta) to work properly when using suggestions.\n- Overrides passing under Py3. [Daniel Lindsley]\n- Simple backend ported & passing. [Daniel Lindsley]\n- Whoosh all but fully working under Python 3. [Daniel Lindsley]\n- Closer on porting ES. [Daniel Lindsley]\n- Core tests mostly pass on Py 3. \\o/ [Daniel Lindsley]\n\n  What's left are 3 failures, all ordering issues, where the correct output is present, but ordering is different between Py2 / Py3.\n- More porting to Py3. [Daniel Lindsley]\n- Started porting to py3. [Daniel Lindsley]\n- Merge pull request #821 from knightzero/patch-1. [Justin Caratzas]\n\n  Update autocomplete.rst\n- Update autocomplete.rst. [knightzero]\n- Merge pull request #744 from trigger-corp/master. [Justin Caratzas]\n\n  Allow for document boosting with elasticsearch\n- Update the current elasticsearch boost test to also test document\n  boosting. [Connor Dunn]\n- Map boost field to _boost in elasticsearch. [Connor Dunn]\n\n  Means that including a boost field in a document will cause document level boosting.\n- Added ethurgood to AUTHORS. [Daniel Lindsley]\n- Add test__to_python for elastisearch backend. [Eric Thurgood]\n- Fix datetime instantiation in elasticsearch backend's _to_python.\n  [Eric Thurgood]\n- Merge pull request #810 from pabluk/minor-docs-fix. [Chris Adams]\n\n  Updated description for TIMEOUT setting - thanks @pabluk\n- Updated description for TIMEOUT setting. [Pablo SEMINARIO]\n- Updated the backend support docs. Thanks to kezabelle & dimiro1 for\n  the report! [Daniel Lindsley]\n- Added haystack-rqueue to \"Other Apps\". [Daniel Lindsley]\n- Updated README & index. [Daniel Lindsley]\n- Added installation instructions. [bigjust]\n- Merge pull request #556 from h3/master. [Justin Caratzas]\n\n  Updated to 'xapian_backend.XapianEngine' docs & example\n- Updated XapianEngine module path. [h3]\n- Updated XapianEngine module path. [h3]\n- Merge pull request #660 from seldon/master. [Justin Caratzas]\n\n  Some minor docs fixes\n- Fixed a few typos in docs. [Lorenzo Franceschini]\n- Add Educreations to who uses Haystack. [bigjust]\n- Merge pull request #692 from stephenpaulger/master. [Justin Caratzas]\n\n  Change the README link to latest 1.2 release.\n- Update README.rst. [Stephen Paulger]\n\n  Update 1.2.6 link to 1.2.7\n- Merge pull request #714 from miracle2k/patch-1. [Justin Caratzas]\n\n  Note enabling INCLUDE_SPELLING requires a reindex.\n- Note enabling INCLUDE_SPELLING requires a reindex. [Michael Elsdörfer]\n- Unicode support in SimpleSearchQuery (closes #793) [slollo]\n- Merge pull request #790 from andrewschoen/feature/haystack-identifier-\n  module. [Andrew Schoen]\n\n  Added a new setting, HAYSTACK_IDENTIFIER_METHOD, which will allow a cust...\n- Added a new setting, ``HAYSTACK_IDENTIFIER_METHOD``, which will allow\n  a custom method to be provided for ``haystack.utils.get_identifier``.\n  [Schoen]\n- Fixed an exception log message in elasticsearch backend, and added a\n  loading test for elasticsearch. [Dan Watson]\n- Changed exception log message in whoosh backend to use\n  __class__.__name__ instead of just __name__ (closes #641) [Jeffrey\n  Tratner]\n- Further bumped the docs on installing engines. [Daniel Lindsley]\n- Update docs/installing_search_engines.rst. [Tom Dyson]\n\n  grammar, Elasticsearch version and formatting consistency fixes.\n- Added GroundCity & Docket Alarm to the Who Uses docs. [Daniel\n  Lindsley]\n- Started the development on v2.0.1. [Daniel Lindsley]\n\n\nv2.0.0 (2013-05-12)\n-------------------\n- Bumped to v2.0.0! [Daniel Lindsley]\n- Changed how ``Raw`` inputs are handled. Thanks to kylemacfarlane for\n  the (really good) report. [Daniel Lindsley]\n- Added a (passing) test trying to verify #545. [Daniel Lindsley]\n- Fixed a doc example on custom forms. Thanks to GrivIN and benspaulding\n  for patches. [Daniel Lindsley]\n- Added a reserved character for Solr (v4+ supports regexes). Thanks to\n  RealBigB for the initial patch. [Daniel Lindsley]\n- Merge branch 'master' of github.com:toastdriven/django-haystack.\n  [Jannis Leidel]\n- Fixed the stats tests. [Daniel Lindsley]\n- Adding description of stats support to docs. [Ranjit Chacko]\n- Adding support for stats queries in Solr. [Ranjit Chacko]\n- Added tests for the previous kwargs patch. [Daniel Lindsley]\n- Bug fix to allow object removal without a commit. [Madan Thangavelu]\n- Do not refresh the index after it has been deleted. [Kevin Tran]\n- Fixed naming of manager for consistency. [Jannis Leidel]\n\n  - renamed `HaystackManager` to `SearchIndexManager`\n  - renamed `get_query_set` to `get_search_queryset`\n- Updated the docs on running tests. [Daniel Lindsley]\n- Merge branch 'madan' [Daniel Lindsley]\n- Fixed the case where index_name isn't available. [Daniel Lindsley]\n- Fixing typo to allow manager to switch between different index_labels.\n  [Madan Thangavelu]\n- Haystack manager and tests. [Madan Thangavelu]\n- Removing unwanted spaces. [Madan Thangavelu]\n- Object query manager for searchindex. [Madan Thangavelu]\n- Added requirements file for testing. [Daniel Lindsley]\n- Added a unit test for #786. [Dan Watson]\n- Fixed a bug when passing \"using\" to SearchQuerySet (closes #786).\n  [Rohan Gupta]\n- Ignore the env directory. [Daniel Lindsley]\n- Allow for setuptools as well as distutils. [Daniel Lindsley]\n- Merge pull request #785 from mattdeboard/dev-mailing-list. [Chris\n  Adams]\n\n  Add note directing users to django-haystack-dev mailing list.\n- Add note directing users to django-haystack-dev mailing list. [Matt\n  DeBoard]\n- Spelling suggestions for ElasticSearch (closes #769 and #747) [Dan\n  Watson]\n- Added support for sending facet options to the backend (closes #753)\n  [Dan Watson]\n- More_like_this: honor .models() restriction. [Chris Adams]\n\n  Original patch by @mattdeboard updated to remove test drift since it was\n  originally submitted\n\n  Closes #593\n  Closes #543\n- Removed commercial support info. [Daniel Lindsley]\n- Merge pull request #779 from pombredanne/pep386_docfixes. [Jannis\n  Leidel]\n\n  Update version to 2.0.0b0 in doc conf\n- Update version to 2.0.0b0 in doc conf .. to redeem myself of the\n  unlucky #777 minimess. [pombredanne]\n- Merge pull request #778 from falinsky/patch-1. [Justin Caratzas]\n\n  Fix bug in setup.py\n- Fix bug. [Sergey Falinsky]\n- Merge pull request #777 from pombredanne/patch-1. [Justin Caratzas]\n\n  Update version to be a PEP386 strict with a minor qualifier of 0 for now...\n- Update version to be a PEP386 strict with a minor qualifier of 0 for\n  now. [pombredanne]\n\n  This version becomes a \"strict\" version under PEP386 and should be recognized by install/packaging tools (such as distribute/distutils/setuptools) as newer than 2.0.0-beta. This will also help making small increments of the version which brings some sanity when using an update from HEAD and ensure that things will upgrade alright.\n- Update_index: display Unicode model names (closes #767) [Chris Adams]\n\n  The model's verbose_name_plural value is included as Unicode but under\n  Python 2.x the progress message it was included in was a regular\n  byte-string. Now it's correctly handled as Unicode throughout.\n- Merge pull request #731 from adityar7/master. [Jannis Leidel]\n\n  Setup custom routers before settings up signal processor.\n- Setup custom routers before settings up signal processor. [Aditya\n  Rajgarhia]\n\n  Fixes https://github.com/toastdriven/django-haystack/issues/727\n- Port the `from_python` method from pyelasticsearch to the\n  Elasticsearch backend, similar to `to_python` in\n  181bbc2c010a135b536e4d1f7a1c5ae4c63e33db. [Jannis Leidel]\n\n  Fixes #762. Refs #759.\n- Merge pull request #761 from stefanw/simple-models-filter. [Justin\n  Caratzas]\n\n  Make models filter work on simple backend\n- Make model filter for simple backend work. [Stefan Wehrmeyer]\n\n  Adds Stefan Wehrmeyer to AUTHORS for patch\n- Merge pull request #746 from lazerscience/fix-update-index-output.\n  [Justin Caratzas]\n\n  Using force_text for indexing message\n- Replacing `force_text` with `force_unicode`. #746. [Bernhard Vallant]\n- Using force_text for indexing message. [Bernhard Vallant]\n\n  verbose_name_plural may be a functional proxy object from ugettext_lazy,\n  it should be forced to be a string!\n- Support pyelasticsearch 0.4 change (closes #759) [Chris Adams]\n\n  pyelasticsearch 0.4 removed the `to_python` method Haystack used.\n\n  Thanks to @erikrose for the quick patch\n- Merge pull request #755 from toastdriven/issue/754-doc-build-warning.\n  [Chris Adams]\n- Add preceding dots to hyperlink target; fixes issue 754. [Ben\n  Spaulding]\n\n  This error was introduced in commit faacbcb.\n- Merge pull request #752 from bigjust/master. [Justin Caratzas]\n\n  Fix Simple Score field collision\n- Simple: Fix bug in score field collision. [bigjust]\n\n  Previous commit 0a9c919 broke the simple backend for models that\n  didn't have an indexed score field.  Added a test to cover regression.\n- Set zip_safe in setup.py to prevent egg creation. [Jannis Leidel]\n\n  This is a work around for a bug in Django that prevents detection of management commands embedded in packages installed as setuptools eggs.\n- Merge pull request #740 from acdha/simplify-search-view-name-property.\n  [Chris Adams]\n\n  Remove redundant __name__ assignment on SearchView\n- Remove redundant __name__ assignment on SearchView. [Chris Adams]\n\n  __name__ was being explicitly set to a value which was the same as the\n  default value.\n\n  Additionally corrected the obsolete __name__ method declaration in the\n  documentation which reflected the code prior to SHA:89d8096 in 2010.\n- Merge pull request #698 from gjb83/master. [Chris Adams]\n\n  Fixed deprecation warning for url imports on Django 1.3\n\n  Thanks to @gjb83 for the patch.\n- Removed star imports. [gjb83]\n- Maintain Django 1.3 compatibility. [gjb83]\n- Fixed deprecation warning. [gjb83]\n\n  django.conf.urls.defaults is now deprecated. Use django.conf.urls instead.\n- Merge pull request #743 from bigjust/solr-managementcmd-fix. [Justin\n  Caratzas]\n\n  Solr build_solr_schema: fixed a bug in build_solr_schema. Thanks to mjum...\n- Solr build_solr_schema: fixed a bug in build_solr_schema. Thanks to\n  mjumbewu for the report! [Justin Caratzas]\n\n  If you tried to run build_solr_schema with a backend that supports\n  schema building, but was not Solr (like Whoosh), then you would get an\n  invalid schema.  This fix raises the ImproperlyConfigured exception\n  with a proper message.\n- Merge pull request #742 from bigjust/simple-backend-score-fix. [Justin\n  Caratzas]\n- Simple: removed conflicting score field from raw result objects.\n  [Justin Caratzas]\n\n  This keeps consistency with the Solr backend, which resolves this conflict\n  in the same manner.\n- ElasticSearch: fix AltParser test. [Chris Adams]\n\n  AltParser queries are still broken but that fucntionality has only been\n  listed as supported on Solr.\n- Better Solr AltParser quoting (closes #730) [Chris Adams]\n\n  Previously the Solr AltParser implementation embedded the search term as an\n  attribte inside the {!…} construct, which required it to be doubly escaped.\n\n  This change contributed by @ivirabyan moves the value outside the query,\n  requiring only our normal quoting:\n\n      q=(_query_:\"{!edismax}Assassin's Creed\")\n\n  instead of:\n\n      q=(_query_:\"{!edismax v='Assassin's Creed'}\")\n\n  Thanks @ivirabyan for the patch!\n- Solr: use nested query syntax for AltParser queries. [Chris Adams]\n\n  The previous implementation would, given a query like this::\n\n      sqs.filter(content=AltParser('dismax', 'library', qf=\"title^2 text\" mm=1))\n\n  generate a query like this::\n\n      {!dismax v=library qf=\"title^2 text\" mm=1}\n\n  This works in certain situations but causes Solr to choke while parsing it\n  when Haystack wraps this term in parentheses::\n\n      org.apache.lucene.queryParser.ParseException: Cannot parse '({!dismax mm=1 qf='title^2 text institution^0.8' v=library})':\n      Encountered \" &lt;RANGEEX_GOOP&gt; \"qf=\\'title^1.25 \"\" at line 1, column 16.\n\n  The solution is to use the nested query syntax described here:\n\n      http://searchhub.org/2009/03/31/nested-queries-in-solr/\n\n  This will produce a query like this, which works with Solr 3.6.2::\n\n      (_query_:\"{!edismax mm=1 qf='title^1.5 text institution^0.5' v=library}\")\n\n  Leaving the actual URL query string looking like this::\n\n      q=%28_query_%3A%22%7B%21edismax+mm%3D1+qf%3D%27title%5E1.5+text+institution%5E0.5%27+v%3Dlibrary%7D%22%29\n\n  * Tests updated for the new query generation output\n  * A Solr backend task was added to actually run the dismax queries and verify\n    that we're not getting Solr 400s errors due to syntax gremlins\n- Pass active backend to index queryset calls (closes #534) [Chris\n  Adams]\n\n  Now the Index index_queryset() and read_queryset() methods will be called with\n  the active backend name so they can optionally perform backend-specific\n  filtering.\n\n  This is extremely useful when using something like Solr cores to maintain\n  language specific backends, allowing an Index to select the appropriate\n  documents for each language::\n\n      def index_queryset(self, using=None):\n          return Post.objects.filter(language=using)\n\n  Changes:\n      * clear_index, update_index and rebuild_index all default to processing\n        *every* backend. ``--using`` may now be provided multiple times to select\n        a subset of the configured backends.\n      * Added examples to the Multiple Index documentation page\n- Because Windows. [Daniel Lindsley]\n- Fixed the docs on debugging to cover v2. Thanks to eltesttox for the\n  report. [Daniel Lindsley]\n- That second colon matters. [Daniel Lindsley]\n- Further docs on autocomplete. [Daniel Lindsley]\n- Fixed the imports that would stomp on each other. [Daniel Lindsley]\n\n  Thanks to codeinthehole, Attorney-Fee & imacleod for pointing this out.\n- BACKWARD-INCOMPATIBLE: Removed ``RealTimeSearchIndex`` in favor of\n  ``SignalProcessors``. [Daniel Lindsley]\n\n  This only affects people who were using ``RealTimeSearchIndex`` (or a\n  queuing variant) to perform near real-time updates. Those users should\n  refer to the Migration documentation.\n- Updated ignores. [Daniel Lindsley]\n- Merge pull request #552 from hadesgames/master. [Jannis Leidel]\n\n  Fixes process leak when using update_index with workers.\n- Fixed update_index process leak. [Tache Alexandru]\n- Merge branch 'master' of github.com:toastdriven/django-haystack.\n  [Jannis Leidel]\n- Merge pull request #682 from acdha/682-update_index-tz-support. [Chris\n  Adams]\n\n  update_index should use non-naive datetime when settings.USE_TZ=True\n- Tests for update_index timezone support. [Chris Adams]\n\n  * Confirm that update_index --age uses the Django timezone-aware now\n    support function\n  * Skip this test on Django 1.3\n- Update_index: use tz-aware datetime where applicable. [Chris Adams]\n\n  This will allow Django 1.4 users with USE_TZ=True to use update_index with time\n  windowing as expected - otherwise the timezone offset needs to be manually\n  included in the value passed to -a\n- Tests: mark expected failures in Whoosh suite. [Chris Adams]\n\n  This avoids making it painful to run the test suite and flags the tests which\n  need attention\n- Tests: mark expected failures in ElasticSearch suite. [Chris Adams]\n\n  This avoids making it painful to run the test suite and flags the tests which\n  need attention\n- Multiple index tests: correct handling of Whoosh teardown. [Chris\n  Adams]\n\n  We can't remove the Whoosh directory per-test - only after every\n  test has run…\n- Whoosh tests: use a unique tempdir. [Chris Adams]\n\n  This ensures that there's no way for results to persist across runs\n  and lets the OS clean up the mess if we fail catastrophically\n\n  The multiindex and regular whoosh tests will have different prefixes to ease\n  debugging\n- Merge pull request #699 from acdha/tox-multiple-django-versions.\n  [Chris Adams]\n\n  Minor tox.ini & test runner tidying\n- Test runner: set exit codes on failure. [Chris Adams]\n- Tox: refactor envlist to include Django versions. [Chris Adams]\n\n  * Expanded base dependencies\n  * Set TEST_RUNNER_ARGS=-v0 to reduce console noise\n  * Add permutations of python 2.5, 2.6, 2.7 and django 1.3 and 1.4\n- Test runner: add $TEST_RUNNER_ARGS env. variable. [Chris Adams]\n\n  This allows you to export TEST_RUNNER_ARGS=-v0 to affect all 9\n  invocations\n- Tox: store downloads in tmpdir. [Chris Adams]\n- Be a bit more careful when resetting connections in the\n  multiprocessing updater. Fixes #562. [Jannis Leidel]\n- Fixed distance handling in result parser of the elasticsearch backend.\n  This is basically the second part of #566. Thanks to Josh Drake for\n  the initial patch. [Jannis Leidel]\n- Merge pull request #670 from dhan88/master. [Jannis Leidel]\n\n  Elasticsearch backend using incorrect coordinates for geo_bounding_box (within) filter\n- Elasticsearch geo_bounding_box filter expects top_left (northwest) and\n  bottom_right (southeast). Haystack's elasticsearch backend is passing\n  northeast and southwest coordinates instead. [Danny Han]\n- Merge pull request #666 from caioariede/master. [Jannis Leidel]\n\n  Fixes incorrect call to put_mapping on ElasticSearch backend\n- Fixes incorrect call to put_mapping on elasticsearch backend. [Caio\n  Ariede]\n- Added ericholscher to AUTHORS. [Daniel Lindsley]\n- Add a title for the support matrix so it's linkable. [Eric Holscher]\n- Tests: command-line help and coverage.py support. [Chris Adams]\n\n  This makes run_all_tests.sh a little easier to use and simplifies the process of\n  running under coverage.py\n\n  Closes #683\n- Tests: basic help and coverage.py support. [Chris Adams]\n\n  run_all_tests.sh now supports --help and --with-coverage\n- Add a CONTRIBUTING.md file for Github. [Chris Adams]\n\n  This is a migrated copy of docs/contributing.rst so Github can suggest it when\n  pull requests are being created\n- Fix combination logic for complex queries. [Chris Adams]\n\n  Previously combining querysets which used a mix of logical AND and OR operations\n  behaved unexpectedly.\n\n  Thanks to @mjl for the patch and tests in SHA: 9192dbd\n\n  Closes #613, #617\n- Added rz to AUTHORS. [Daniel Lindsley]\n- Fixed string joining bug in the simple backend. [Rodrigo Guzman]\n- Added failing test case for #438. [Daniel Lindsley]\n- Fix Solr more-like-this tests (closes #655) [Chris Adams]\n\n  * Refactored the MLT tests to be less brittle in checking only\n    the top 5 results without respect to slight ordering\n    variations.\n  * Refactored LiveSolrMoreLikeThisTestCase into multiple tests\n  * Convert MLT templatetag tests to rely on mocks for stability\n    and to avoid hard-coding backend assumptions, at the expense\n    of relying completely on the backend MLT queryset-level tests\n    to exercise that code.\n  * Updated MLT code to always assume deferred querysets are\n    available (introduced in Django 1.1) and removed a hard-coded\n    internal attr check\n- All backends: fixed more_like_this & deferreds. [Chris Adams]\n\n  Django removed the get_proxied_model helper function in the 1.3 dev\n  cycle:\n\n  https://code.djangoproject.com/ticket/17678\n\n  This change adds support for the simple new property access used by 1.3+\n\n  BACKWARD INCOMPATIBLE: Django 1.2 is no longer supported\n- Updated elasticsearch backend to use a newer pyelasticsearch release\n  that features an improved API , connection pooling and better\n  exception handling. [Jannis Leidel]\n- Added Gidsy to list of who uses Haystack. [Jannis Leidel]\n- Increased the number of terms facets returned by the Elasticsearch\n  backend to 100 from the default 10 to work around an issue upstream.\n  [Jannis Leidel]\n\n  This is hopefully only temporary until it's fixed in Elasticsearch, see https://github.com/elasticsearch/elasticsearch/issues/1776.\n- Merge pull request #643 from stephenmcd/master. [Chris Adams]\n\n  Fixed logging in simple_backend\n- Fixed logging in simple_backend. [Stephen McDonald]\n- Added Pitchup to Who Uses. [Daniel Lindsley]\n- Merge branch 'unittest2-fix' [Chris Adams]\n- Better unittest2 detection. [Chris Adams]\n\n  This supports Python 2.6 and earlier by shifting the import to look\n  towards the future name rather than the past\n- Merge pull request #652 from acdha/solr-content-extraction-test-fix.\n  [Chris Adams]\n\n  Fix the Solr content extraction handler tests\n- Add a minimal .travis.yml file to suppress build spam. [Chris Adams]\n\n  Until the travis-config branch is merged in, this can be spread around to avoid\n  wasting time running builds before we're ready\n- Tests: enable Solr content extraction handler. [Chris Adams]\n\n  This is needed for the test_content_extraction test to pass\n- Tests: Solr: fail immediately on config errors. [Chris Adams]\n- Solr tests: clean unused imports. [Chris Adams]\n- Suppress console DeprecationWarnings. [Chris Adams]\n- Merge pull request #651 from acdha/unittest2-fix. [Chris Adams]\n\n  Update unittest2 import logic so the tests can actually be run\n- Update unittest2 import logic. [Chris Adams]\n\n  We'll try to get it from Django 1.3+ but Django 1.2 users will need to install\n  it manually\n- Merge pull request #650 from bigjust/patch-1. [Chris Adams]\n\n  Fix typo in docstring\n- Fix typo. [Justin Caratzas]\n- Refactor to use a dummy logger that lets you turn off logging. [Travis\n  Swicegood]\n- A bunch of Solr testing cleanup. [Chris Adams]\n- Skip test is pysolr isn't available. [Travis Swicegood]\n- Updated Who Uses to correct a backend usage. [Daniel Lindsley]\n- Updated documentation about using the main pyelasticsearch release.\n  [Jannis Leidel]\n- Merge pull request #628 from kjoconnor/patch-1. [Jannis Leidel]\n\n  Missing `\n- Missing ` [Kevin O'Connor]\n- Fixed a mostly-empty warning in the ``SearchQuerySet`` docs. Thanks to\n  originell for the report! [Daniel Lindsley]\n- Fixed the \"Who Uses\" entry on AstroBin. [Daniel Lindsley]\n- Use the match_all query to speed up performing filter only queries\n  dramatically. [Jannis Leidel]\n- Fixed typo in docs. Closes #612. [Jannis Leidel]\n- Updated link to celery-haystack repository. [Jannis Leidel]\n- Fixed the docstring of SearchQuerySet.none. Closes #435. [Jannis\n  Leidel]\n- Fixed the way quoting is done in the Whoosh backend when using the\n  ``__in`` filter. [Jason Kraus]\n- Added the solrconfig.xml I use for testing. [Daniel Lindsley]\n- Fixed typo in input types docs. Closes #551. [Jannis Leidel]\n- Make sure an search engine's backend isn't instantiated on every call\n  to the backend but only once. Fixes #580. [Jannis Leidel]\n- Restored sorting to ES backend that was broken in\n  d1fa95529553ef8d053308159ae4efc455e0183f. [Jannis Leidel]\n- Prevent spatial filters from stomping on existing filters in\n  ElasticSearch backend. [Josh Drake]\n- Merge branch 'mattdeboard-sq-run-refactor' [Jannis Leidel]\n- Fixed an ES test that seems like a change in behavior in recent ES\n  versions. [Jannis Leidel]\n- Merge branch 'sq-run-refactor' of https://github.com/mattdeboard\n  /django-haystack into mattdeboard-sq-run-refactor. [Jannis Leidel]\n- Refactor Solr & ES SearchQuery subclasses to use the ``build_params``\n  from ``BaseSearchQuery`` to build the kwargs to be passed to the\n  search engine. [Matt DeBoard]\n\n  This refactor is made to make extending Haystack simpler. I only ran the Solr tests which invoked a ``run`` call (via ``get_results``), and those passed. I did not run the ElasticSearch tests; however, the ``run`` method for both Lucene-based search engines were identical before, and are identical now. The test I did run -- ``LiveSolrSearchQueryTestCase.test_log_query`` -- passed.\n- Merge branch 'master' of https://github.com/toastdriven/django-\n  haystack. [Jannis Leidel]\n- Merge pull request #568 from duncm/master. [Jannis Leidel]\n\n  Fix exception in SearchIndex.get_model()\n- Fixed ``SearchIndex.get_model()`` to raise exception instead of\n  returning it. [Duncan Maitland]\n- Merge branch 'master' of https://github.com/toastdriven/django-\n  haystack. [Jannis Leidel]\n- Fixed Django 1.4 compatibility. Thanks to bloodchild for the report!\n  [Daniel Lindsley]\n- Refactored ``SearchBackend.search`` so that kwarg-generation\n  operations are in a discrete method. [Matt DeBoard]\n\n  This makes it much simpler to subclass ``SearchBackend`` (& the engine-specific variants) to add support for new parameters.\n- Added witten to AUTHORS. [Daniel Lindsley]\n- Fix for #378: Highlighter returns unexpected results if one term is\n  found within another. [dan]\n- Removed jezdez's old entry in AUTHORS. [Daniel Lindsley]\n- Added Jannis to Primary Authors. [Daniel Lindsley]\n- Merge branch 'master' of github.com:jezdez/django-haystack. [Jannis\n  Leidel]\n- Fixed a raise condition when using the simple backend (e.g. in tests)\n  and changing the DEBUG setting dynamically (e.g. in integration\n  tests). [Jannis Leidel]\n- Add missing `ImproperlyConfigured` import from django's exceptions.\n  [Luis Nell]\n\n  l178 failed.\n- Commercial support is now officially available for Haystack. [Daniel\n  Lindsley]\n- Using multiple workers (and resetting the connection) causes things to\n  break when the app is finished and it moves to the next and does\n  qs.count() to get a count of the objects in that app to index with\n  psycopg2 reporting a closed connection. Manually closing the\n  connection before each iteration if using multiple workers before\n  building the queryset fixes this issue. [Adam Fast]\n- Removed code leftover from v1.X. Thanks to kossovics for the report!\n  [Daniel Lindsley]\n- Fixed a raise condition when using the simple backend (e.g. in tests)\n  and changing the DEBUG setting dynamically (e.g. in integration\n  tests). [Jannis Leidel]\n- All backends let individual documents fail, rather than failing whole\n  chunks. Forward port of acdha's work on 1.2.X. [Daniel Lindsley]\n- Added ikks to AUTHORS. [Daniel Lindsley]\n- Fixed ``model_choices`` to use ``smart_unicode``. [Igor Támara]\n- +localwiki.org. [Philip Neustrom]\n- Added Pix Populi to \"Who Uses\". [Daniel Lindsley]\n- Added contribution guidelines. [Daniel Lindsley]\n- Updated the docs to reflect the supported version of Django. Thanks to\n  catalanojuan for the original patch! [Daniel Lindsley]\n- Fix PYTHONPATH Export and add Elasticsearch example. [Craig Nagy]\n- Updated the Whoosh URL. Thanks to cbess for the original patch!\n  [Daniel Lindsley]\n- Reset database connections on each process on update_index when using\n  --workers. [Diego Búrigo Zacarão]\n- Moved the ``build_queryset`` method to ``SearchIndex``. [Alex Vidal]\n\n  This method is used to build the queryset for indexing operations. It is copied\n  from the build_queryset function that lived in the update_index management\n  command.\n\n  Making this change allows developers to modify the queryset used for indexing\n  even when a date filter is necessary. See `tests/core/indexes.py` for tests.\n- Fixed a bug where ``Indexable`` could be mistakenly recognized as a\n  discoverable class. Thanks to twoolie for the original patch! [Daniel\n  Lindsley]\n- Fixed a bug with query construction. Thanks to dstufft for the report!\n  [Daniel Lindsley]\n\n  This goes back to erroring on the side of too many parens, where there weren't enough before. The engines will no-op them when they're not important.\n- Fixed a bug where South would cause Haystack to setup too soon. Thanks\n  to adamfast for the report! [Daniel Lindsley]\n- Added Crate.io to \"Who Uses\"! [Daniel Lindsley]\n- Fixed a small typo in spatial docs. [Frank Wiles]\n- Logging: avoid forcing string interpolation. [Chris Adams]\n- Fixed docs on using a template for Solr schema. [Daniel Lindsley]\n- Add note to 'Installing Search Engines' doc explaining how to override\n  the template used by 'build_solr_schema' [Matt DeBoard]\n- Better handling of ``.models``. Thanks to zbyte64 for the report &\n  HonzaKral for the original patch! [Daniel Lindsley]\n- Added Honza to AUTHORS. [Daniel Lindsley]\n- Handle sorting for ElasticSearch better. [Honza Kral]\n- Update docs/backend_support.rst. [Issac Kelly]\n- Fixed a bug where it's possible to erroneously try to get spelling\n  suggestions. Thanks to bigjust for the report! [Daniel Lindsley]\n- The ``dateutil`` requirement is now optional. Thanks to arthurnn for\n  the report. [Daniel Lindsley]\n- Fixed docs on Solr spelling suggestion until the new Suggester support\n  can be added. Thanks to zw0rk & many others for the report! [Daniel\n  Lindsley]\n- Bumped to beta. [Daniel Lindsley]\n\n  We're not there yet, but we're getting close.\n- Added saved-search to subproject docs. [Daniel Lindsley]\n- Search index discovery no longer swallows errors with reckless\n  abandon. Thanks to denplis for the report! [Daniel Lindsley]\n- Elasticsearch backend officially supported. [Daniel Lindsley]\n\n  All tests passing.\n- Back down to 3 on latest pyelasticsearch. [Daniel Lindsley]\n- And then there were 3 (Elasticsearch test failures). [Daniel Lindsley]\n- Solr tests now run faster. [Daniel Lindsley]\n- Improved the tutorial docs. Thanks to denplis for the report! [Daniel\n  Lindsley]\n- Down to 9 failures on Elasticsearch. [Daniel Lindsley]\n- Because the wishlist has changed. [Daniel Lindsley]\n- A few small fixes. Thanks to robhudson for the report! [Daniel\n  Lindsley]\n- Added an experimental Elasticsearch backend. [Daniel Lindsley]\n\n  Tests are not yet passing but it works in basic hand-testing. Passing test coverage coming soon.\n- Fixed a bug related to the use of ``Exact``. [Daniel Lindsley]\n- Removed accidental indent. [Daniel Lindsley]\n- Ensure that importing fields without the GeoDjango kit doesn't cause\n  an error. Thanks to dimamoroz for the report! [Daniel Lindsley]\n- Added the ability to reload a connection. [Daniel Lindsley]\n- Fixed ``rebuild_index`` to properly have all options available.\n  [Daniel Lindsley]\n- Fixed a bug in pagination. Thanks to sgoll for the report! [Daniel\n  Lindsley]\n- Added an example to the docs on what to put in ``INSTALLED_APPS``.\n  Thanks to Dan Krol for the suggestion. [Daniel Lindsley]\n- Changed imports so the geospatial modules are only imported as needed.\n  [Dan Loewenherz]\n- Better excluded index detection. [Daniel Lindsley]\n- Fixed a couple of small typos. [Sean Bleier]\n- Made sure the toolbar templates are included in the source\n  distribution. [Jannis Leidel]\n- Fixed a few documentation issues. [Jannis Leidel]\n- Moved my contribution for the geospatial backend to a attribution of\n  Gidsy which funded my work. [Jannis Leidel]\n- Small docs fix. [Daniel Lindsley]\n- Added input types, which enables advanced querying support. Thanks to\n  CMGdigital for funding the development! [Daniel Lindsley]\n- Added geospatial search support! [Daniel Lindsley]\n\n  I have anxiously waited to add this feature for almost 3 years now.\n  Support is finally present in more than one backend & I was\n  generously given some paid time to work on implementing this.\n\n  Thanks go out to:\n\n    * CMGdigital, who paid for ~50% of the development of this feature\n      & were awesomely supportive.\n    * Jannis Leidel (jezdez), who did the original version of this\n      patch & was an excellent sounding board.\n    * Adam Fast, for patiently holding my hand through some of the\n      geospatial confusions & for helping me verify GeoDjango\n      functionality.\n    * Justin Bronn, for the great work he originally did on\n      GeoDjango, which served as a point of reference/inspiration\n      on the API.\n\n  And thanks to all others who have submitted a variety of\n  patches/pull requests/interest throughout the years trying to get\n  this feature in place.\n- Added .values() / .values_list() methods, for fetching less data.\n  Thanks to acdha for the original implementation! [Daniel Lindsley]\n- Reduced the number of queries Haystack has to perform in many cases\n  (pagination/facet_counts/spelling_suggestions). Thanks to acdha for\n  the improvements! [Daniel Lindsley]\n- Spruced up the layout on the new DjDT panel. [Daniel Lindsley]\n- Fixed compatibility with Django pre-1.4 trunk. * The\n  MAX_SHOW_ALL_ALLOWED variable is no longer available, and hence causes\n  an ImportError with Django versions higher 1.3. * The\n  \"list_max_show_all\" attribute on the ChangeList object is used\n  instead. * This patch maintains compatibility with Django 1.3 and\n  lower by trying to import the MAX_SHOW_ALL_ALLOWED variable first.\n  [Aram Dulyan]\n- Updated ``setup.py`` for the new panel bits. [Daniel Lindsley]\n- Added a basic DjDT panel for Haystack. Thanks to robhudson for\n  planting the seed that Haystack should bundle this! [Daniel Lindsley]\n- Added the ability to specify apps or individual models to\n  ``update_index``. Thanks to CMGdigital for funding this development!\n  [Daniel Lindsley]\n- Added ``--start/--end`` flags to ``update_index`` to allow finer-\n  grained control over date ranges. Thanks to CMGdigital for funding\n  this development! [Daniel Lindsley]\n- I hate Python packaging. [Daniel Lindsley]\n- Made ``SearchIndex`` classes thread-safe. Thanks to craigds for the\n  report & original patch. [Daniel Lindsley]\n- Added a couple more uses. [Daniel Lindsley]\n- Bumped reqs in docs for content extraction bits. [Daniel Lindsley]\n- Added a long description for PyPI. [Daniel Lindsley]\n- Solr backend support for rich-content extraction. [Chris Adams]\n\n  This allows indexes to use text extracted from binary files as well\n  as normal database content.\n- Fixed errant ``self.log``. [Daniel Lindsley]\n\n  Thanks to terryh for the report!\n- Fixed a bug with index inheritance. [Daniel Lindsley]\n\n  Fields would seem to not obey the MRO while method did. Thanks to ironfroggy for the report!\n- Fixed a long-time bug where the Whoosh backend didn't have a ``log``\n  attribute. [Daniel Lindsley]\n- Fixed a bug with Whoosh's edge n-gram support to be consistent with\n  the implementation in the other engines. [Daniel Lindsley]\n- Added celery-haystack to Other Apps. [Daniel Lindsley]\n- Changed ``auto_query`` so it can be run on other, non-``content``\n  fields. [Daniel Lindsley]\n- Removed extra loops through the field list for a slight performance\n  gain. [Daniel Lindsley]\n- Moved ``EXCLUDED_INDEXES`` to a per-backend setting. [Daniel Lindsley]\n- BACKWARD-INCOMPATIBLE: The default filter is now ``__contains`` (in\n  place of ``__exact``). [Daniel Lindsley]\n\n  If you were relying on this behavior before, simply add ``__exact`` to the fieldname.\n- BACKWARD-INCOMPATIBLE: All \"concrete\" ``SearchIndex`` classes must now\n  mixin ``indexes.Indexable`` as well in order to be included in the\n  index. [Daniel Lindsley]\n- Added tox to the mix. [Daniel Lindsley]\n- Allow for less configuration. Thanks to jeromer & cyberdelia for the\n  reports! [Daniel Lindsley]\n- Fixed up the management commands to show the right alias & use the\n  default better. Thanks to jeromer for the report! [Daniel Lindsley]\n- Fixed a bug where signals wouldn't get setup properly, especially on\n  ``RealTimeSearchIndex``. Thanks to byoungb for the report! [Daniel\n  Lindsley]\n- Fixed formatting in the tutorial. [Daniel Lindsley]\n- Removed outdated warning about padding numeric fields. Thanks to\n  mchaput for pointing this out! [Daniel Lindsley]\n- Added a silent failure option to prevent Haystack from suppressing\n  some failures. [Daniel Lindsley]\n\n  This option defaults to ``True`` for compatibility & to prevent cases where lost connections can break reindexes/searches.\n- Fixed the simple backend to not throw an exception when handed an\n  ``SQ``. Thanks to diegobz for the report! [Daniel Lindsley]\n- Whoosh now supports More Like This! Requires Whoosh 1.8.4. [Daniel\n  Lindsley]\n- Deprecated ``get_queryset`` & fixed how indexing happens. Thanks to\n  Craig de Stigter & others for the report! [Daniel Lindsley]\n- Fixed a bug where ``RealTimeSearchIndex`` was erroneously included in\n  index discovery. Thanks to dedsm for the report & original patch!\n  [Daniel Lindsley]\n- Added Vickery to \"Who Uses\". [Daniel Lindsley]\n- Require Whoosh 1.8.3+. It's for your own good. [Daniel Lindsley]\n- Added multiprocessing support to ``update_index``! Thanks to\n  CMGdigital for funding development of this feature. [Daniel Lindsley]\n- Fixed a bug where ``set`` couldn't be used with ``__in``. Thanks to\n  Kronuz for the report! [Daniel Lindsley]\n- Added a ``DecimalField``. [Daniel Lindsley]\n- Fixed a bug where a different style of import could confuse the\n  collection of indexes. Thanks to groovecoder for the report. [Daniel\n  Lindsley]\n- Fixed a typo in the autocomplete docs. Thanks to anderso for the\n  catch! [Daniel Lindsley]\n- Fixed a backward-incompatible query syntax change Whoosh introduced\n  between 1.6.1 & 1.6.2 that causes only one model to appear as though\n  it is indexed. [Daniel Lindsley]\n- Updated AUTHORS to reflect the Kent's involvement in multiple index\n  support. [Daniel Lindsley]\n- BACKWARD-INCOMPATIBLE: Added multiple index support to Haystack, which\n  enables you to talk to more than one search engine in the same\n  codebase. Thanks to: [Daniel Lindsley]\n\n  * Kent Gormat for funding the development of this feature.\n  * alex, freakboy3742 & all the others who contributed to Django's multidb feature, on which much of this was based.\n  * acdha for inspiration & feedback.\n  * dcramer for inspiration & feedback.\n  * mcroydon for patch review & docs feedback.\n\n  This commit starts the development efforts for Haystack v2.\n\n\nv1.2.7 (2012-04-06)\n-------------------\n- Bumped to v1.2.7! [Daniel Lindsley]\n- Solr: more informative logging when full_prepare fails during update.\n  [Chris Adams]\n\n  * Change the exception handler to record per-object failures\n  * Log the precise object which failed in a manner which tools like Sentry can examine\n- Added ikks to AUTHORS. [Daniel Lindsley]\n- Fixed ``model_choices`` to use ``smart_unicode``. Thanks to ikks for\n  the patch! [Daniel Lindsley]\n- Fixed compatibility with Django pre-1.4 trunk. * The\n  MAX_SHOW_ALL_ALLOWED variable is no longer available, and hence causes\n  an ImportError with Django versions higher 1.3. * The\n  \"list_max_show_all\" attribute on the ChangeList object is used\n  instead. * This patch maintains compatibility with Django 1.3 and\n  lower by trying to import the MAX_SHOW_ALL_ALLOWED variable first.\n  [Aram Dulyan]\n- Fixed a bug in pagination. Thanks to sgoll for the report! [Daniel\n  Lindsley]\n- Added an example to the docs on what to put in ``INSTALLED_APPS``.\n  Thanks to Dan Krol for the suggestion. [Daniel Lindsley]\n- Added .values() / .values_list() methods, for fetching less data.\n  [Chris Adams]\n- Reduced the number of queries Haystack has to perform in many cases\n  (pagination/facet_counts/spelling_suggestions). [Chris Adams]\n- Fixed compatibility with Django pre-1.4 trunk. * The\n  MAX_SHOW_ALL_ALLOWED variable is no longer available, and hence causes\n  an ImportError with Django versions higher 1.3. * The\n  \"list_max_show_all\" attribute on the ChangeList object is used\n  instead. * This patch maintains compatibility with Django 1.3 and\n  lower by trying to import the MAX_SHOW_ALL_ALLOWED variable first.\n  [Aram Dulyan]\n\n\nv1.2.6 (2011-12-09)\n-------------------\n- I hate Python packaging. [Daniel Lindsley]\n- Bumped to v1.2.6! [Daniel Lindsley]\n- Made ``SearchIndex`` classes thread-safe. Thanks to craigds for the\n  report & original patch. [Daniel Lindsley]\n- Added a long description for PyPI. [Daniel Lindsley]\n- Fixed errant ``self.log``. [Daniel Lindsley]\n\n  Thanks to terryh for the report!\n- Started 1.2.6. [Daniel Lindsley]\n\n\nv1.2.5 (2011-09-14)\n-------------------\n- Bumped to v1.2.5! [Daniel Lindsley]\n- Fixed a bug with index inheritance. [Daniel Lindsley]\n\n  Fields would seem to not obey the MRO while method did. Thanks to ironfroggy for the report!\n- Fixed a long-time bug where the Whoosh backend didn't have a ``log``\n  attribute. [Daniel Lindsley]\n- Fixed a bug with Whoosh's edge n-gram support to be consistent with\n  the implementation in the other engines. [Daniel Lindsley]\n- Added tswicegood to AUTHORS. [Daniel Lindsley]\n- Fixed the ``clear_index`` management command to respect the ``--site``\n  option. [Travis Swicegood]\n- Removed outdated warning about padding numeric fields. Thanks to\n  mchaput for pointing this out! [Daniel Lindsley]\n- Added a silent failure option to prevent Haystack from suppressing\n  some failures. [Daniel Lindsley]\n\n  This option defaults to ``True`` for compatibility & to prevent cases where lost connections can break reindexes/searches.\n- Fixed the simple backend to not throw an exception when handed an\n  ``SQ``. Thanks to diegobz for the report! [Daniel Lindsley]\n- Bumped version post-release. [Daniel Lindsley]\n- Whoosh now supports More Like This! Requires Whoosh 1.8.4. [Daniel\n  Lindsley]\n\n\nv1.2.4 (2011-05-28)\n-------------------\n- Bumped to v1.2.4! [Daniel Lindsley]\n- Fixed a bug where the old ``get_queryset`` wouldn't be used during\n  ``update_index``. Thanks to Craig de Stigter & others for the report.\n  [Daniel Lindsley]\n- Bumped to v1.2.3! [Daniel Lindsley]\n- Require Whoosh 1.8.3+. It's for your own good. [Daniel Lindsley]\n\n\nv1.2.2 (2011-05-19)\n-------------------\n- Bumped to v1.2.2! [Daniel Lindsley]\n- Added multiprocessing support to ``update_index``! Thanks to\n  CMGdigital for funding development of this feature. [Daniel Lindsley]\n- Fixed a bug where ``set`` couldn't be used with ``__in``. Thanks to\n  Kronuz for the report! [Daniel Lindsley]\n- Added a ``DecimalField``. [Daniel Lindsley]\n\n\nv1.2.1 (2011-05-14)\n-------------------\n- Bumped to v1.2.1. [Daniel Lindsley]\n- Fixed a typo in the autocomplete docs. Thanks to anderso for the\n  catch! [Daniel Lindsley]\n- Fixed a backward-incompatible query syntax change Whoosh introduced\n  between 1.6.1 & 1.6.2 that causes only one model to appear as though\n  it is indexed. [Daniel Lindsley]\n\n\nv1.2.0 (2011-05-03)\n-------------------\n- V1.2.0! [Daniel Lindsley]\n- Added ``request`` to the ``FacetedSearchView`` context. Thanks to\n  dannercustommade for the report! [Daniel Lindsley]\n- Fixed the docs on enabling spelling suggestion support in Solr.\n  [Daniel Lindsley]\n- Fixed a bug so that ``ValuesListQuerySet`` now works with the ``__in``\n  filter. Thanks to jcdyer for the report! [Daniel Lindsley]\n- Added the new ``SearchIndex.read_queryset`` bits. [Sam Cooke]\n- Changed ``update_index`` so that it warns you if your\n  ``SearchIndex.get_queryset`` returns an unusable object. [Daniel\n  Lindsley]\n- Removed Python 2.3 compat code & bumped requirements for the impending\n  release. [Daniel Lindsley]\n- Added treyhunner to AUTHORS. [Daniel Lindsley]\n- Improved the way selected_facets are handled. [Chris Adams]\n\n  * ``selected_facets`` may be provided multiple times.\n  * Facet values are quoted to avoid backend confusion (i.e. `author:Joe Blow` is seen by Solr as `author:Joe AND Blow` rather than the expected `author:\"Joe Blow\"`)\n- Add test for Whoosh field boost. [Trey Hunner]\n- Enable field boosting with Whoosh backend. [Trey Hunner]\n- Fixed the Solr & Whoosh backends to use the correct ``site`` when\n  processing results. Thanks to Madan Thangavelu for the original patch!\n  [Daniel Lindsley]\n- Added lukeman to AUTHORS. [Daniel Lindsley]\n- Updating Solr download and installation instructions to reference\n  version 1.4.1 as 1.3.x is no longer available. Fixes #341. [lukeman]\n- Revert \"Shifted ``handle_registrations`` into ``models.py``.\" [Daniel\n  Lindsley]\n\n  This seems to be breaking for people, despite working here & passing tests. Back to the drawing board...\n\n  This reverts commit 106758f88a9bc5ab7e505be62d385d876fbc52fe.\n- Shifted ``handle_registrations`` into ``models.py``. [Daniel Lindsley]\n\n  For historical reasons, it was (wrongly) kept & run in ``__init__.py``. This should help fix many people's issues with it running too soon.\n- Pulled out ``EmptyResults`` for testing elsewhere. [Daniel Lindsley]\n- Fixed a bug where boolean filtering wouldn't work properly on Whoosh.\n  Thanks to alexrobbins for pointing it out! [Daniel Lindsley]\n- Added link to 1.1 version of the docs. [Daniel Lindsley]\n- Whoosh 1.8.1 compatibility. [Daniel Lindsley]\n- Added TodasLasRecetas to \"Who Uses\". Thanks Javier! [Daniel Lindsley]\n- Added a new method to ``SearchQuerySet`` to allow you to specify a\n  custom ``result_class`` to use in place of ``SearchResult``. Thanks to\n  aaronvanderlip for getting me thinking about this! [Daniel Lindsley]\n- Added better autocomplete support to Haystack. [Daniel Lindsley]\n- Changed ``SearchForm`` to be more permissive of missing form data,\n  especially when the form is unbound. Thanks to cleifer for pointing\n  this out! [Daniel Lindsley]\n- Ensured that the primary key of the result is a string. Thanks to\n  gremmie for pointing this out! [Daniel Lindsley]\n- Fixed a typo in the tutorial. Thanks to JavierLopezMunoz for pointing\n  this out! [Daniel Lindsley]\n- Added appropriate warnings about ``HAYSTACK_<ENGINE>_PATH`` settings\n  in the docs. [Daniel Lindsley]\n- Added some checks for badly-behaved backends. [Daniel Lindsley]\n- Ensure ``use_template`` can't be used with ``MultiValueField``.\n  [Daniel Lindsley]\n- Added n-gram fields for auto-complete style searching. [Daniel\n  Lindsley]\n- Added ``django-celery-haystack`` to the subapp docs. [Daniel Lindsley]\n- Fixed the the faceting docs to correctly link to narrowed facets.\n  Thanks to daveumr for pointing that out! [Daniel Lindsley]\n- Updated docs to reflect the ``form_kwargs`` that can be used for\n  customization. [Daniel Lindsley]\n- Whoosh backend now explicitly closes searchers in an attempt to use\n  fewer file handles. [Daniel Lindsley]\n- Changed fields so that ``boost`` is now the parameter of choice over\n  ``weight`` (though ``weight`` has been retained for backward\n  compatibility). Thanks to many people for the report! [Daniel\n  Lindsley]\n- Bumped revision. [Daniel Lindsley]\n\n\nv1.1 (2010-11-23)\n-----------------\n- Bumped version to v1.1! [Daniel Lindsley]\n- The ``build_solr_schema`` command can now write directly to a file.\n  Also includes tests for the new overrides. [Daniel Lindsley]\n- Haystack's reserved field names are now configurable. [Daniel\n  Lindsley]\n- BACKWARD-INCOMPATIBLE: ``auto_query`` has changed so that only double\n  quotes cause exact match searches. Thanks to craigds for the report!\n  [Daniel Lindsley]\n- Added docs on handling content-type specific output in results.\n  [Daniel Lindsley]\n- Added tests for ``content_type``. [Daniel Lindsley]\n- Added docs on boosting. [Daniel Lindsley]\n- Updated the ``searchfield_api`` docs. [Daniel Lindsley]\n- ``template_name`` can be a list of templates passed to\n  ``loader.select_template``. Thanks to zifot for the suggestion.\n  [Daniel Lindsley]\n- Moved handle_facet_parameters call into FacetField's __init__. [Travis\n  Cline]\n- Updated the pysolr dependency docs & added a debugging note about\n  boost support. [Daniel Lindsley]\n- Starting the beta. [Daniel Lindsley]\n- Fixed a bug with ``FacetedSearchForm`` where ``cleaned_data`` may not\n  exist. Thanks to imageinary for the report! [Daniel Lindsley]\n- Added the ability to build epub versions of the docs. [Alfredo]\n- Clarified that the current supported version of Whoosh is the 1.1.1+\n  series. Thanks to glesica for the report & original patch! [Daniel\n  Lindsley]\n- The SearchAdmin now correctly uses SEARCH_VAR instead of assuming\n  things. [Rob Hudson]\n- Added the ability to \"weight\" individual fields to adjust their\n  relevance. [David Sauve]\n- Fixed facet fieldname lookups to use the proper fieldname. [Daniel\n  Lindsley]\n- Removed unneeded imports from the Solr backend. [Daniel Lindsley]\n- Further revamping of faceting. Each field type now has a faceted\n  variant that's created either with ``faceted=True`` or manual\n  initialization. [Daniel Lindsley]\n\n  This should also make user-created field types possible, as many of the gross ``isinstance`` checks were removed.\n- Fixes SearchQuerySet not pickleable. Patch by oyiptong, tests by\n  toastdriven. [oyiptong]\n- Added the ability to remove objects from the index that are no longer\n  in the database to the ``update_index`` management command. [Daniel\n  Lindsley]\n- Added a ``range`` filter type. Thanks to davisp & lukesneeringer for\n  the suggestion! [Daniel Lindsley]\n\n  Note that integer ranges are broken on the current Whoosh (1.1.1). However, date & character ranges seem to work fine.\n- Consistency. [Daniel Lindsley]\n- Ensured that multiple calls to ``count`` don't result in multiple\n  queries. Thanks to Nagyman and others for the report! [Daniel\n  Lindsley]\n- Ensure that when fetching the length of a result set that the whole\n  index isn't consumed (especially on Whoosh & Xapian). [Daniel\n  Lindsley]\n- Really fixed dict ordering bugs in SearchSite. [Travis Cline]\n- Changed how you query for facets and how how they are presented in the\n  facet counts.  Allows customization of facet field names in indexes.\n  [Travis Cline]\n\n  Lightly backward-incompatible (git only).\n- Made it easier to override ``SearchView/SearchForm`` behavior when no\n  query is present. [Daniel Lindsley]\n\n  No longer do you need to override both ``SearchForm`` & ``SearchView`` if you want to return all results. Use the built-in ``SearchView``, provide your own custom ``SearchForm`` subclass & override the ``no_query_found`` method per the docstring.\n- Don't assume that any pk castable to an integer should be an integer.\n  [Carl Meyer]\n- Fetching a list of all fields now produces correct results regardless\n  of dict-ordering. Thanks to carljm & veselosky for the report! [Daniel\n  Lindsley]\n- Added notes about what is needed to make schema-building independent\n  of dict-ordering. [Daniel Lindsley]\n- Sorted model order matters. [Daniel Lindsley]\n- Prevent Whoosh from erroring if the ``end_offset`` is less than or\n  equal to 0. Thanks to zifot for the report! [Daniel Lindsley]\n- Removed insecure use of ``eval`` from the Whoosh backend. Thanks to\n  SmileyChris for pointing this out. [Daniel Lindsley]\n- Disallow ``indexed=False`` on ``FacetFields``. Thanks to jefftriplett\n  for the report! [Daniel Lindsley]\n- Added ``FacetField`` & changed the way facets are processed. [Daniel\n  Lindsley]\n\n  Facet data is no longer quietly duplicated just before it goes into the index. Instead, full fields are created (with all the standard data & methods) to contain the faceted information.\n\n  This change is backward-compatible, but allows for better extension, not requiring data duplication into an unfaceted field and a little less magic.\n- EmptyQuerySet.facet_counts() won't hit the backend. [Chris Adams]\n\n  This avoids an unnecessary extra backend query displaying the default\n  faceted search form.\n- TextMate fail. [Daniel Lindsley]\n- Changed ``__name__`` to an attribute on ``SearchView`` to work with\n  decorators. Thanks to trybik for the report! [Daniel Lindsley]\n- Changed some wording on the tutorial to indicate where the data\n  template should go. Thanks for the suggestion Davepar! [Daniel\n  Lindsley]\n- Merge branch 'whoosh-1.1' [Daniel Lindsley]\n- Final cleanup before merging Whoosh 1.1 branch! [Daniel Lindsley]\n- Final Whoosh 1.1.1 fixes. Waiting for an official release of Whoosh &\n  hand testing, then this ought to be merge-able. [Daniel Lindsley]\n- Upgraded the Whoosh backend to 1.1. Still one remaining test failure\n  and two errors. Waiting on mchaput's thoughts/patches. [Daniel\n  Lindsley]\n- Mistakenly committed this change. This bug is not fixed. [Daniel\n  Lindsley]\n- Better handling of attempts at loading backends when the various\n  supporting libraries aren't installed. Thanks to traviscline for the\n  report. [Daniel Lindsley]\n- Fixed random test failures from not running the Solr tests in awhile.\n  [Daniel Lindsley]\n- Changed mlt test to use a set comparison to eliminate failures due to\n  ordering differences. [Travis Cline]\n- Sped up Solr backend tests by moving away from RealTimeSearchIndex\n  since it was adding objects to Solr when loading fixtures. [Travis\n  Cline]\n- Automatically add ``suggestion`` to the context if\n  ``HAYSTACK_INCLUDE_SPELLING`` is set. Thanks to notanumber for the\n  suggestion! [Daniel Lindsley]\n- Added apollo13 to AUTHORS for the ``SearchForm.__init__`` cleanup.\n  [Daniel Lindsley]\n- Use kwargs.pop instead of try/except. [Florian Apolloner]\n- Added Rob to AUTHORS for the admin cleanup. [Daniel Lindsley]\n- Fixed selection_note text by adding missing zero. [Rob Hudson]\n- Fixed full_result_count in admin search results. [Rob Hudson]\n- Fixed admin actions in admin search results. [Rob Hudson]\n- Added DevCheatSheet to \"Who Uses\". [Daniel Lindsley]\n- Added Christchurch Art Gallery to \"Who Uses\". [Daniel Lindsley]\n- Forgot to include ghostrocket as submitting a patch on the previous\n  commit. [Daniel Lindsley]\n- Fixed a serious bug in the ``simple`` backend that would flip the\n  object instance and class. [Daniel Lindsley]\n- Updated Whoosh to 0.3.18. [Daniel Lindsley]\n- Updated NASA's use of Haystack in \"Who Uses\". [Daniel Lindsley]\n- Changed how ``ModelSearchIndex`` introspects to accurately use\n  ``IntegerField`` instead of ``FloatField`` as it was using. [Daniel\n  Lindsley]\n- Added CongresoVisible to Who Uses. [Daniel Lindsley]\n- Added a test to verify a previous change to the ``simple`` backend.\n  [Daniel Lindsley]\n- Fixed the new admin bits to not explode on Django 1.1. [Daniel\n  Lindsley]\n- Added ``SearchModelAdmin``, which enables Haystack-based search within\n  the admin. [Daniel Lindsley]\n- Fixed a bug when not specifying a ``limit`` when using the\n  ``more_like_this`` template tag. Thanks to symroe for the original\n  patch. [Daniel Lindsley]\n- Fixed the error messages that occur when looking up attributes on a\n  model. Thanks to acdha for the patch. [Daniel Lindsley]\n- Added pagination to the example search template in the docs so it's\n  clear that it is supported. [Daniel Lindsley]\n- Fixed copy-paste foul in ``Installing Search Engines`` docs. [Daniel\n  Lindsley]\n- Fixed the ``simple`` backend to return ``SearchResult`` instances, not\n  just bare model instances. Thanks to Agos for the report. [Daniel\n  Lindsley]\n- Fixed the ``clear_index`` management command to respect\n  ``--verbosity``. Thanks to kylemacfarlane for the report. [Daniel\n  Lindsley]\n- Altered the ``simple`` backend to only search textual fields. This\n  makes the backend work consistently across all databases and is likely\n  the desired behavior anyhow. Thanks to kylemacfarlane for the report.\n  [Daniel Lindsley]\n- Fixed a bug in the ``Highlighter`` which would double-highlight HTML\n  tags. Thanks to EmilStenstrom for the original patch. [Daniel\n  Lindsley]\n- Updated management command docs to mention all options that are\n  accepted. [Daniel Lindsley]\n- Altered the Whoosh backend to correctly clear the index when using the\n  ``RAMStorage`` backend. Thanks to kylemacfarlane for the initial\n  patch. [Daniel Lindsley]\n- Changed ``SearchView`` to allow more control over how many results are\n  shown per page. Thanks to simonw for the suggestion. [Daniel Lindsley]\n- Ignore ``.pyo`` files when listing out the backend options. Thanks to\n  kylemacfarlane for the report. [Daniel Lindsley]\n- Added CustomMade to Who Uses. [Daniel Lindsley]\n- Moved a backend import to allow changing the backend Haystack uses on\n  the fly. [Daniel Lindsley]\n\n  Useful for testing.\n- Added more debugging information to the docs. [Daniel Lindsley]\n- Added DeliverGood.org to the \"Who Uses\" docs. [Daniel Lindsley]\n- Added an settings override on ``HAYSTACK_LIMIT_TO_REGISTERED_MODELS``\n  as a possible performance optimization. [Daniel Lindsley]\n- Added the ability to pickle ``SearchResult`` objects. Thanks to dedsm\n  for the original patch. [Daniel Lindsley]\n- Added docs and fixed tests on the backend loading portions. Thanks to\n  kylemacfarlane for the report. [Daniel Lindsley]\n- Fixed bug with ``build_solr_schema`` where ``stored=False`` would be\n  ignored. Thanks to johnthedebs for the report. [Daniel Lindsley]\n- Added debugging notes for Solr. Thanks to smccully for reporting this.\n  [Daniel Lindsley]\n- Fixed several errors in the ``simple`` backend. Thanks to notanumber\n  for the original patch. [Daniel Lindsley]\n- Documentation fixes for Xapian. Thanks to notanumber for the edits!\n  [Daniel Lindsley]\n- Fixed a typo in the tutorial. Thanks to cmbeelby for pointing this\n  out. [Daniel Lindsley]\n- Fixed an error in the tutorial. Thanks to bencc for pointing this out.\n  [Daniel Lindsley]\n- Added a warning to the docs that ``SearchQuerySet.raw_search`` does\n  not chain. Thanks to jacobstr for the report. [Daniel Lindsley]\n- Fixed an error in the documentation on providing fields for faceting.\n  Thanks to ghostmob for the report. [Daniel Lindsley]\n- Fixed a bug where a field that's both nullable & faceted would error\n  if no data was provided. Thanks to LarryEitel for the report. [Daniel\n  Lindsley]\n- Fixed a regression where the built-in Haystack fields would no longer\n  facet correctly. Thanks to traviscline for the report. [Daniel\n  Lindsley]\n- Fixed last code snippet on the ``SearchIndex.prepare_FOO`` docs.\n  Thanks to sk1p for pointing that out. [Daniel Lindsley]\n- Fixed a bug where the schema could be built improperly if similar\n  fieldnames had different options. [Daniel Lindsley]\n- Added to existing tests to ensure that multiple faceted fields are\n  included in the index. [Daniel Lindsley]\n- Finally added a README. [Daniel Lindsley]\n- Added a note about versions of the docs. [Daniel Lindsley]\n- Go back to the default Sphinx theme. The custom Haystack theme is too\n  much work and too little benefit. [Daniel Lindsley]\n- Added a note in the tutorial about building the schema when using\n  Solr. Thanks to trey0 for the report! [Daniel Lindsley]\n- Fixed a bug where using ``SearchQuerySet.models()`` on an unregistered\n  model would be silently ignored. [Daniel Lindsley]\n\n  It is still silently ignored, but now emits a warning informing the user of why they may receive more results back than they expect.\n- Added notes about the ``simple`` backend in the docs. Thanks to\n  notanumber for catching the omission. [Daniel Lindsley]\n- Removed erroneous old docs about Lucene support, which never landed.\n  [Daniel Lindsley]\n- Merge branch 'master' of github.com:toastdriven/django-haystack.\n  [Daniel Lindsley]\n- Fixed typo in the tutorial. Thanks fxdgear for pointing that out!\n  [Daniel Lindsley]\n- Fixed a bug related to Unicode data in conjunction with the ``dummy``\n  backend. Thanks to kylemacfarlane for the report! [Daniel Lindsley]\n- Added Forkinit to Who Uses. [Daniel Lindsley]\n- Added Rampframe to Who Uses. [Daniel Lindsley]\n- Added other apps documentation for Haystack-related apps. [Daniel\n  Lindsley]\n- Unified the way ``DEFAULT_OPERATOR`` is setup. [Daniel Lindsley]\n- You can now override ``ITERATOR_LOAD_PER_QUERY`` with a setting if\n  you're consuming big chunks of a ``SearchQuerySet``. Thanks to\n  kylemacfarlane for the report. [Daniel Lindsley]\n- Moved the preparation of faceting data to a\n  ``SearchIndex.full_prepare()`` method for easier overriding. Thanks to\n  xav for the suggestion! [Daniel Lindsley]\n- The ``more_like_this`` tag now silently fails if things go south.\n  Thanks to piquadrat for the patch! [Daniel Lindsley]\n- Added a fleshed out ``simple_backend`` for basic usage + testing.\n  [David Sauve]\n- ``SearchView.build_form()`` now accepts a dict to pass along to the\n  form. Thanks to traviscline for the patch! [Daniel Lindsley]\n- Fixed the ``setup.py`` to include ``haystack.utils`` and added to the\n  ``MANIFEST.in``. Thanks to jezdez for the patch! [Daniel Lindsley]\n- Fixed date faceting in Solr. [Daniel Lindsley]\n\n  No more OOMs and very fast over large data sets.\n- Added the ``search_view_factory`` function for thread-safe use of\n  ``SearchView``. [Daniel Lindsley]\n- Added more to the docs about the ``SearchQuerySet.narrow()`` method to\n  describe when/why to use it. [Daniel Lindsley]\n- Fixed Whoosh tests. [Daniel Lindsley]\n\n  Somewhere, a reference to the old index was hanging around causing incorrect failures.\n- The Whoosh backed now uses the ``AsyncWriter``, which ought to provide\n  better performance. Requires Whoosh 0.3.15 or greater. [Daniel\n  Lindsley]\n- Added a way to pull the correct fieldname, regardless if it's been\n  overridden or not. [Daniel Lindsley]\n- Added docs about adding new fields. [Daniel Lindsley]\n- Removed a painful ``isinstance`` check which should make non-standard\n  usages easier. [Daniel Lindsley]\n- Updated docs regarding reserved field names in Haystack. [Daniel\n  Lindsley]\n- Pushed some of the new faceting bits down in the implementation.\n  [Daniel Lindsley]\n- Removed unnecessary fields from the Solr schema template. [Daniel\n  Lindsley]\n- Revamped how faceting is done within Haystack to make it easier to\n  work with. [Daniel Lindsley]\n- Add more sites to Who Uses. [Daniel Lindsley]\n- Fixed a bug in ``ModelSearchIndex`` where the ``index_fieldname``\n  would not get set. Also added a way to override it in a general\n  fashion. Thanks to traviscline for the patch! [Daniel Lindsley]\n- Backend API standardization. Thanks to batiste for the report! [Daniel\n  Lindsley]\n- Removed a method that was supposed to have been removed before 1.0.\n  Oops. [Daniel Lindsley]\n- Added the ability to override field names within the index. Thanks to\n  traviscline for the suggestion and original patch! [Daniel Lindsley]\n- Corrected the AUTHORS because slai actually provided the patch. Sorry\n  about that. [Daniel Lindsley]\n- Refined the internals of ``ModelSearchIndex`` to be a little more\n  flexible. Thanks to traviscline for the patch! [Daniel Lindsley]\n- The Whoosh backend now supports ``RamStorage`` for use with testing or\n  other non-permanent indexes. [Daniel Lindsley]\n- Fixed a bug in the ``Highlighter`` involving repetition and regular\n  expressions. Thanks to alanzoppa for the original patch! [Daniel\n  Lindsley]\n- Fixed a bug in the Whoosh backend when a ``MultiValueField`` is empty.\n  Thanks to alanwj for the original patch! [Daniel Lindsley]\n- All dynamic imports now use ``importlib``. Thanks to bfirsh for the\n  original patch mentioning this. [Daniel Lindsley]\n\n  A backported version of ``importlib`` is included for compatibility with Django 1.0.\n- Altered ``EmptySearchQuerySet`` so it's usable from templates. Thanks\n  to bfirsh for the patch! [Daniel Lindsley]\n- Added tests to ensure a Whoosh regression is no longer present.\n  [Daniel Lindsley]\n- Fixed a bug in Whoosh where using just ``.models()`` would create an\n  invalid query. Thanks to ricobl for the original patch. [Daniel\n  Lindsley]\n- Forms with initial data now display it when used with SearchView.\n  Thanks to osirius for the original patch. [Daniel Lindsley]\n- App order is now consistent with INSTALLED_APPS when running\n  ``update_index``. [Daniel Lindsley]\n- Updated docs to reflect the recommended way to do imports in when\n  defining ``SearchIndex`` classes. [Daniel Lindsley]\n\n  This is not my preferred style but reduces the import errors some people experience.\n- Fixed omission of Xapian in the settings docs. Thanks to flebel for\n  pointing this out. [Daniel Lindsley]\n- Little bits of cleanup related to testing. [Daniel Lindsley]\n- Fixed an error in the docs related to pre-rendering data. [Daniel\n  Lindsley]\n- Added Pegasus News to Who Uses. [Daniel Lindsley]\n- Corrected an import in forms for consistency. Thanks to bkonkle for\n  pointing this out. [Daniel Lindsley]\n- Fixed bug where passing a customized ``site`` would not make it down\n  through the whole stack. Thanks to Peter Bengtsson for the report and\n  original patch. [Daniel Lindsley]\n- Bumped copyright years. [Daniel Lindsley]\n- Changed Whoosh backend so most imports will raise the correct\n  exception. Thanks to shabda for the suggestion. [Daniel Lindsley]\n- Refactored Solr's tests to minimize reindexes. Runs ~50% faster.\n  [Daniel Lindsley]\n- Fixed a couple potential circular imports. [Daniel Lindsley]\n- The same field can now have multiple query facets. Thanks to bfirsh\n  for the original patch. [Daniel Lindsley]\n- Added schema for testing Solr. [Daniel Lindsley]\n- Fixed a string interpolation bug when adding an invalid data facet.\n  Thanks to simonw for the original patch. [Daniel Lindsley]\n- Fixed the default highlighter to give slightly better results,\n  especially with short strings. Thanks to RobertGawron for the original\n  patch. [Daniel Lindsley]\n- Changed the ``rebuild_index`` command so it can take all options that\n  can be passed to either ``clear_index`` or ``update_index``. Thanks to\n  brosner for suggesting this. [Daniel Lindsley]\n- Added ``--noinput`` flag to ``clear_index``. Thanks to aljosa for the\n  suggestion. [Daniel Lindsley]\n- Updated the example in the template to be a little more real-world and\n  user friendly. Thanks to j0hnsmith for pointing this out. [Daniel\n  Lindsley]\n- Fixed a bug with the Whoosh backend where scores weren't getting\n  populated correctly. Thanks to horribtastic for the report. [Daniel\n  Lindsley]\n- Changed ``EmptySearchQuerySet`` so it returns an empty list when\n  slicing instead of mistakenly running queries. Thanks to askfor for\n  reporting this bug. [Daniel Lindsley]\n- Switched ``SearchView`` & ``FacetedSearchView`` to use\n  ``EmptySearchQuerySet`` (instead of a regular list) when there are no\n  results. Thanks to acdha for the original patch. [Daniel Lindsley]\n- Added RedditGifts to \"Who Uses\". [Daniel Lindsley]\n- Added Winding Road to \"Who Uses\". [Daniel Lindsley]\n- Added ryszard's full name to AUTHORS. [Daniel Lindsley]\n- Added initialization bits to part of the Solr test suite. Thanks to\n  notanumber for pointing this out. [Daniel Lindsley]\n- Started the 1.1-alpha work. Apologies for not doing this sooner.\n  [Daniel Lindsley]\n- Added an advanced setting for disabling Haystack's initialization in\n  the event of a conflict with other apps. [Daniel Lindsley]\n- Altered ``SearchForm`` to use ``.is_valid()`` instead of ``.clean()``,\n  which is a more idiomatic/correct usage. Thanks to askfor for the\n  suggestion. [Daniel Lindsley]\n- Added MANIFEST to ignore list. [Daniel Lindsley]\n- Fixed Django 1.0 compatibility when using the Solr backend. [Daniel\n  Lindsley]\n- Marked Haystack as 1.0 final. [Daniel Lindsley]\n- Incorrect test result from changing the documented way the\n  ``highlight`` template tag gets called. [Daniel Lindsley]\n- Updated the example in faceting documentation to provide better\n  results and explanation on the reasoning. [Daniel Lindsley]\n- Added further documentation about\n  ``SearchIndex``/``RealTimeSearchIndex``. [Daniel Lindsley]\n- Added docs about `SearchQuerySet.highlight`. [toastdriven]\n- Added further docs on `RealTimeSearchIndex`. [toastdriven]\n- Added documentation on the ``RealTimeSearchIndex`` class.\n  [toastdriven]\n- Fixed the documentation for the arguments on the `highlight` tag.\n  Thanks to lucalenardi for pointing this out. [Daniel Lindsley]\n- Fixed tutorial to mention where the `NoteSearchIndex` should be\n  placed. Thanks to bkeating for pointing this out. [Daniel Lindsley]\n- Marked Haystack as 1.0.0 release candidate 1. [Daniel Lindsley]\n- Haystack now requires Whoosh 0.3.5. [Daniel Lindsley]\n- Last minute documentation cleanup. [Daniel Lindsley]\n- Added documentation about the management commands that come with\n  Haystack. [Daniel Lindsley]\n- Added docs on the template tags included with Haystack. [Daniel\n  Lindsley]\n- Added docs on highlighting. [Daniel Lindsley]\n- Removed some unneeded legacy code that was causing conflicts when\n  Haystack was used with apps that load all models (such as `django-\n  cms2`, `localemiddleware` or `django-transmeta`). [Daniel Lindsley]\n- Removed old code from the `update_index` command. [Daniel Lindsley]\n- Altered spelling suggestion test to something a little more\n  consistent. [Daniel Lindsley]\n- Added tests for slicing the end of a `RelatedSearchQuerySet`. [Daniel\n  Lindsley]\n- Fixed case where `SearchQuerySet.more_like_this` would fail when using\n  deferred Models. Thanks to Alex Gaynor for the original patch. [Daniel\n  Lindsley]\n- Added default logging bits to prevent \"No handlers found\" message.\n  [Daniel Lindsley]\n- BACKWARD-INCOMPATIBLE: Renamed `reindex` management command to\n  `update_index`, renamed `clear_search_index` management command to\n  `clear_index` and added a `rebuild_index` command to both clear &\n  reindex. [Daniel Lindsley]\n- BACKWARD-INCOMPATIBLE: `SearchIndex` no longer hooks up\n  `post_save/post_delete` signals for the model it's registered with.\n  [Daniel Lindsley]\n\n  If you use `SearchIndex`, you will have to manually cron up a `reindex` (soon to become `update_index`) management command to periodically refresh the data in your index.\n\n  If you were relying on the old behavior, please use `RealTimeSearchIndex` instead, which does hook up those signals.\n- Ensured that, if a `MultiValueField` is marked as `indexed=False` in\n  Whoosh, it ought not to post-process the field. [Daniel Lindsley]\n- Ensured data going into the indexes round-trips properly. Fixed\n  `DateField`/`DateTimeField` handling for all backends and\n  `MultiValueField` handling in Whoosh. [Daniel Lindsley]\n- Added a customizable `highlight` template tag plus an underlying\n  `Highlighter` implementation. [Daniel Lindsley]\n- Added more documentation about using custom `SearchIndex.prepare_FOO`\n  methods. [Daniel Lindsley]\n- With Whoosh 0.3.5+, the number of open files is greatly reduced.\n  [Daniel Lindsley]\n- Corrected example in docs about `RelatedSearchQuerySet`. Thanks to\n  askfor for pointing this out. [Daniel Lindsley]\n- Altered `SearchResult` objects to fail gracefully when the\n  model/object can't be found. Thanks to akrito for the report. [Daniel\n  Lindsley]\n- Fixed a bug where `auto_query` would fail to escape strings that\n  pulled out for exact matching. Thanks to jefftriplett for the report.\n  [Daniel Lindsley]\n- Added Brick Design to Who Uses. [Daniel Lindsley]\n- Updated backend support docs slightly. [Daniel Lindsley]\n- Added the ability to combine `SearchQuerySet`s via `&` or `|`. Thanks\n  to reesefrancis for the suggestion. [Daniel Lindsley]\n- Revised the most of the tutorial. [Daniel Lindsley]\n- Better documented how user-provided data should be sanitized. [Daniel\n  Lindsley]\n- Fleshed out the `SearchField` documentation. [Daniel Lindsley]\n- Fixed formatting on ``SearchField`` documentation. [Daniel Lindsley]\n- Added basic ``SearchField`` documentation. [Daniel Lindsley]\n\n  More information about the kwargs and usage will be eventually needed.\n- Bumped the `ulimit` so Whoosh tests pass consistently on Mac OS X.\n  [Daniel Lindsley]\n- Fixed the `default` kwarg in `SearchField` (and subclasses) to work\n  properly from a user's perspective. [Daniel Lindsley]\n- BACKWARD-INCOMPATIBLE: Fixed ``raw_search`` to cooperate when\n  paginating/slicing as well as many other conditions. [Daniel Lindsley]\n\n  This no longer immediately runs the query, nor pokes at any internals. It also now takes into account other details, such as sorting & faceting.\n- Fixed a bug in the Whoosh backend where slicing before doing a hit\n  count could cause strange results when paginating. Thanks to\n  kylemacfarlane for the original patch. [Daniel Lindsley]\n- The Whoosh tests now deal with the same data set as the Solr tests and\n  cover various aspects better. [Daniel Lindsley]\n- Started to pull out the real-time, signal-based updates out of the\n  main `SearchIndex` class. Backward compatible for now. [Daniel\n  Lindsley]\n- Fixed docs to include `utils` documentation. [Daniel Lindsley]\n- Updated instructions for installing `pysolr`. Thanks to sboisen for\n  pointing this out. [Daniel Lindsley]\n- Added acdha to AUTHORS for previous commit. [Daniel Lindsley]\n- Added exception handling to the Solr Backend to silently fail/log when\n  Solr is unavailable. Thanks to acdha for the original patch. [Daniel\n  Lindsley]\n- The `more_like_this` tag is now tested within the suite. Also has lots\n  of cleanup for the other Solr tests. [Daniel Lindsley]\n- On both the Solr & Whoosh backends, don't do an update if there's\n  nothing being updated. [Daniel Lindsley]\n- Moved Haystack's internal fields out of the backends and into\n  `SearchIndex.prepare`. [Daniel Lindsley]\n\n  This is both somewhat more DRY as well as a step toward Haystack being useful to non-Django projects.\n- Fixed a bug in the `build_schema` where fields that aren't supposed to\n  be indexed are still getting post-procesed by Solr. Thanks to Jonathan\n  Slenders for the report. [Daniel Lindsley]\n- Added HUGE to Who Uses. [Daniel Lindsley]\n- Fixed bug in Whoosh where it would always generate spelling\n  suggestions off the full query even when given a different query\n  string to check against. [Daniel Lindsley]\n- Simplified the SQ object and removed a limitation on kwargs/field\n  names that could be passed in. Thanks to traviscline for the patch.\n  [Daniel Lindsley]\n- Documentation on `should_update` fixed to match the new signature.\n  Thanks to kylemacfarlane for pointing this out. [Daniel Lindsley]\n- Fixed missing words in Best Practices documentation. Thanks to\n  frankwiles for the original patch. [Daniel Lindsley]\n- The `update_object` method now passes along kwargs as needed to the\n  `should_update` method. Thanks to askfor for the suggestion. [Daniel\n  Lindsley]\n- Updated docs about the removal of the Whoosh fork. [Daniel Lindsley]\n- Removed extraneous `BadSearchIndex3` from test suite. Thanks\n  notanumber! [Daniel Lindsley]\n- We actually want `repr`, not `str`. [Daniel Lindsley]\n- Pushed the `model_attr` check lower down into the `SearchField`s and\n  make it occur later, so that exceptions come at a point where Django\n  can better deal with them. [Daniel Lindsley]\n- Fixed attempting to access an invalid `model_attr`. Thanks to\n  notanumber for the original patch. [Daniel Lindsley]\n- Added SQ objects (replacing the QueryFilter object) as the means to\n  generate queries/query fragments. Thanks to traviscline for all the\n  hard work. [Daniel Lindsley]\n\n  The SQ object is similar to Django's Q object and allows for arbitrarily complex queries. Only backward incompatible if you were relying on the SearchQuery/QueryFilter APIs.\n- Reformatted debugging docs a bit. [Daniel Lindsley]\n- Added debugging information about the Whoosh lock error. [Daniel\n  Lindsley]\n- Brought the TODO up to date. [Daniel Lindsley]\n- Added a warning to the documentation about how `__startswith` may not\n  always provide the expected results. Thanks to codysoyland for\n  pointing this out. [Daniel Lindsley]\n- Added debugging documentation, with more examples coming in the\n  future. [Daniel Lindsley]\n- Added a new `basic_search` view as a both a working example of how to\n  write traditional views and as a thread-safe view, which the class-\n  based ones may/may not be. [Daniel Lindsley]\n- Fixed sample template in the documentation. Thanks to lemonad for\n  pointing this out. [Daniel Lindsley]\n- Updated documentation to include a couple more Sphinx directives.\n  Index is now more useful. [Daniel Lindsley]\n- Made links more obvious in documentation. [Daniel Lindsley]\n- Added an `example_project` demonstrating how a sample project might be\n  setup. [Daniel Lindsley]\n- Fixed `load_backend` to use the argument passed instead of always the\n  `settings.HAYSTACK_SEARCH_ENGINE`. Thanks to newgene for the report.\n  [Daniel Lindsley]\n- Regression where sometimes `narrow_queries` got juggled into a list\n  when it should be a set everywhere. Thanks tcline & ericholscher for\n  the report. [Daniel Lindsley]\n- Updated the Whoosh backend's version requirement to reflect the fully\n  working version of Whoosh. [Daniel Lindsley]\n- With the latest SVN version of Whoosh (r344), `SearchQuerySet()` now\n  works properly in Whoosh. [Daniel Lindsley]\n- Added a `FacetedModelSearchForm`. Thanks to mcroydon for the original\n  patch. [Daniel Lindsley]\n- Added translation capabilities to the `SearchForm` variants. Thanks to\n  hejsan for pointing this out. [Daniel Lindsley]\n- Added AllForLocal to Who Uses. [Daniel Lindsley]\n- The underlying caching has been fixed so it no longer has to fill the\n  entire cache before it to ensure consistency. [Daniel Lindsley]\n\n  This results in significantly faster slicing and reduced memory usage. The test suite is more complete and ensures this functionality better.\n\n  This also removes `load_all_queryset` from the main `SearchQuerySet` implementation. If you were relying on this behavior, you should use `RelatedSearchQuerySet` instead.\n- Log search queries with `DEBUG = True` for debugging purposes, similar\n  to what Django does. [Daniel Lindsley]\n- Updated LJ's Who Uses information. [Daniel Lindsley]\n- Added Sunlight Labs & NASA to the Who Uses list. [Daniel Lindsley]\n- Added Eldarion to the Who Uses list. [Daniel Lindsley]\n- When more of the cache is populated, provide a more accurate `len()`\n  of the `SearchQuerySet`. This ought to only affect advanced usages,\n  like excluding previously-registered models or `load_all_queryset`.\n  [Daniel Lindsley]\n- Fixed a bug where `SearchQuerySet`s longer than `REPR_OUTPUT_SIZE`\n  wouldn't include a note about truncation when `__repr__` is called.\n  [Daniel Lindsley]\n- Added the ability to choose which site is used when reindexing. Thanks\n  to SmileyChris for pointing this out and the original patch. [Daniel\n  Lindsley]\n- Fixed the lack of a `__unicode__` method on `SearchResult` objects.\n  Thanks to mint_xian for pointing this out. [Daniel Lindsley]\n- Typo'd the setup.py changes. Thanks to jlilly for catching that.\n  [Daniel Lindsley]\n- Converted all query strings to Unicode for Whoosh. Thanks to simonw108\n  for pointing this out. [Daniel Lindsley]\n- Added template tags to `setup.py`. Thanks to Bogdan for pointing this\n  out. [Daniel Lindsley]\n- Added two more tests to the Whoosh backend, just to make sure. [Daniel\n  Lindsley]\n- Corrected the way Whoosh handles `order_by`. Thanks to Rowan for\n  pointing this out. [Daniel Lindsley]\n- For the Whoosh backend, ensure the directory is writable by the\n  current user to try to prevent failed writes. [Daniel Lindsley]\n- Added a better label to the main search form field. [Daniel Lindsley]\n- Bringing the Whoosh backend up to version 0.3.0b14. This version of\n  Whoosh has better query parsing, faster indexing and, combined with\n  these changes, should cause fewer disruptions when used in a\n  multiprocess/multithreaded environment. [Daniel Lindsley]\n- Added optional argument to `spelling_suggestion` that lets you provide\n  a different query than the one built by the SearchQuerySet. [Daniel\n  Lindsley]\n\n  Useful for passing along a raw user-provided query, especially when there is a lot of post-processing done.\n- SearchResults now obey the type of data chosen in their corresponding\n  field in the SearchIndex if present. Thanks to evgenius for the\n  original report. [Daniel Lindsley]\n- Fixed a bug in the Solr backend where submitting an empty string to\n  search returned an ancient and incorrect datastructure. Thanks kapa77\n  for the report. [Daniel Lindsley]\n- Fixed a bug where the cache would never properly fill due to the\n  number of results returned being lower than the hit count. This could\n  happen when there were results excluded due to being in the index but\n  the model NOT being registered in the `SearchSite`. Thanks akrito and\n  tcline for the report. [Daniel Lindsley]\n- Altered the docs to look more like the main site. [Daniel Lindsley]\n- Added a (short) list of who uses Haystack. Would love to have more on\n  this list. [Daniel Lindsley]\n- Fixed docs on preparing data. Thanks fud. [Daniel Lindsley]\n- Added the `ModelSearchIndex` class for easier `SearchIndex`\n  generation. [Daniel Lindsley]\n- Added a note about using possibly unsafe data with `filter/exclude`.\n  Thanks to ryszard for pointing this out. [Daniel Lindsley]\n- Standardized the API on `date_facet`. Thanks to notanumber for the\n  original patch. [Daniel Lindsley]\n- Moved constructing the schema down to the `SearchBackend` level. This\n  allows more flexibility when creating a schema. [Daniel Lindsley]\n- Fixed a bug where a hyphen provided to `auto_query` could break the\n  query string. Thanks to ddanier for the report. [Daniel Lindsley]\n- BACKWARD INCOMPATIBLE - For consistency, `get_query_set` has been\n  renamed to `get_queryset` on `SearchIndex` classes. [Daniel Lindsley]\n\n  A simple search & replace to remove the underscore should be all that is needed.\n- Missed two bits while updating the documentation for the Xapian\n  backend. [Daniel Lindsley]\n- Updated documentation to add the Xapian backend information. A big\n  thanks to notatnumber for all his hard work on the Xapian backend.\n  [Daniel Lindsley]\n- Added `EmptySearchQuerySet`. Thanks to askfor for the suggestion!\n  [Daniel Lindsley]\n- Added \"Best Practices\" documentation. [Daniel Lindsley]\n- Added documentation about the `HAYSTACK_SITECONF` setting. [Daniel\n  Lindsley]\n- Fixed erroneous documentation on Xapian not supporting boost. Thanks\n  notanumber! [Daniel Lindsley]\n- BACKWARD INCOMPATIBLE - The `haystack.autodiscover()` and other site\n  modifications now get their own configuration file and should no\n  longer be placed in the `ROOT_URLCONF`. Thanks to SmileyChris for the\n  original patch and patrys for further feedback. [Daniel Lindsley]\n- Added `verbose_name_plural` to the `SearchResult` object. [Daniel\n  Lindsley]\n- Added a warning about ordering by integers with the Whoosh backend.\n  [Daniel Lindsley]\n- Added a note about ordering and accented characters. [Daniel Lindsley]\n- Updated the `more_like_this` tag to allow for narrowing the models\n  returned by the tag. [Daniel Lindsley]\n- Fixed `null=True` for `IntegerField` and `FloatField`. Thanks to\n  ryszard for the report and original patch. [Daniel Lindsley]\n- Reverted aabdc9d4b98edc4735ed0c8b22aa09796c0a29ab as it would cause\n  mod_wsgi environments to fail in conjunction with the admin on Django\n  1.1. [Daniel Lindsley]\n- Added the start of a glossary of terminology. [Daniel Lindsley]\n- Various documentation fixes. Thanks to sk1p & notanumber. [Daniel\n  Lindsley]\n- The `haystack.autodiscover()` and other site modifications may now be\n  placed in ANY URLconf, not just the `ROOT_URLCONF`. Thanks to\n  SmileyChris for the original patch. [Daniel Lindsley]\n- Fixed invalid/empty pages in the SearchView. Thanks to joep and\n  SmileyChris for patches. [Daniel Lindsley]\n- Added a note and an exception about consistent fieldnames for the\n  document field across all `SearchIndex` classes. Thanks sk1p_! [Daniel\n  Lindsley]\n- Possible thread-safety fix related to registration handling. [Daniel\n  Lindsley]\n- BACKWARD INCOMPATIBLE - The 'boost' method no longer takes kwargs.\n  This makes boost a little more useful by allowing advanced terms.\n  [Daniel Lindsley]\n\n  To migrate code, convert multiple kwargs into separate 'boost' calls, quote what was the key and change the '=' to a ','.\n- Updated documentation to match behavioral changes to MLT. [Daniel\n  Lindsley]\n- Fixed a serious bug in MLT on Solr. Internals changed a bit and now\n  things work correctly. [Daniel Lindsley]\n- Removed erroneous 'zip_safe' from setup.py. Thanks ephelon. [Daniel\n  Lindsley]\n- Added `null=True` to fields, allowing you to ignore/skip a field when\n  indexing. Thanks to Kevin for the original patch. [Daniel Lindsley]\n- Fixed a standing test failure. The dummy setup can't do `load_all` due\n  to mocking. [Daniel Lindsley]\n- Added initial `additional_query` to MLT to allow for narrowing\n  results. [Daniel Lindsley]\n- Fixed nasty bug where results would get duplicated due to cached\n  results. [Daniel Lindsley]\n- Altered `ITERATOR_LOAD_PER_QUERY` from 20 to 10. [Daniel Lindsley]\n- Corrected tutorial when dealing with fields that have\n  `use_template=True`. [Daniel Lindsley]\n- Updated documentation to reflect basic Solr setup. [Daniel Lindsley]\n- Fix documentation on grabbing Whoosh and on the 'load_all' parameter\n  for SearchForms. [Daniel Lindsley]\n- Fixed bug where the '__in' filter wouldn't work with phrases or data\n  types other than one-word string/integer. [Daniel Lindsley]\n- Fixed bug so that the 'load_all' option in 'SearchView' now actually\n  does what it says it should. How embarrassing... [Daniel Lindsley]\n- Added ability to specify custom QuerySets for loading records via\n  'load_all'/'load_all_queryset'. [Daniel Lindsley]\n- Fixed a bug where results from non-registered models could appear in\n  the results. [Daniel Lindsley]\n- BACKWARD INCOMPATIBLE - Changed 'module_name' to 'model_name'\n  throughout Haystack related to SearchResult objects. Only incompatible\n  if you were relying on this attribute. [Daniel Lindsley]\n- Added the ability to fetch additional and stored fields from a\n  SearchResult as well as documentation on the SearchResult itself.\n  [Daniel Lindsley]\n- Added the ability to look through relations in SearchIndexes via '__'.\n  [Daniel Lindsley]\n- Added note about the 'text' fieldname convention. [Daniel Lindsley]\n- Added an 'update_object' and 'remove_object' to the SearchSite objects\n  as a shortcut. [Daniel Lindsley]\n- Recover gracefully from queries Whoosh judges to be invalid. [Daniel\n  Lindsley]\n- Missed test from previous commit. [Daniel Lindsley]\n- Added stemming support to Whoosh. [Daniel Lindsley]\n- Removed the commented version. [Daniel Lindsley]\n- Django 1.0.X compatibility fix for the reindex command. [Daniel\n  Lindsley]\n- Reindexes should now consume a lot less RAM. [Daniel Lindsley]\n\n  Evidently, when you run a ton of queries touching virtually everything in your DB, you need to clean out the \"logged\" queries from the connection. Sad but true.\n- Altered `SearchBackend.remove` and `SearchBackend.get_identifier` to\n  accept an object or a string identifier (in the event the object is no\n  longer available). [Daniel Lindsley]\n\n  This is useful in an environment where you no longer have the original object on hand and know what it is you wish to delete.\n- Added a simple (read: ghetto) way to run the test suite without having\n  to mess with settings. [Daniel Lindsley]\n- Added a setting `HAYSTACK_BATCH_SIZE` to control how many objects are\n  processed at once when running a reindex. [Daniel Lindsley]\n- Fixed import that was issuing a warning. [Daniel Lindsley]\n- Further tests to make sure `unregister` works appropriately as well,\n  just to be paranoid. [Daniel Lindsley]\n- Fixed a bizarre bug where backends may see a different site object\n  than the rest of the application code. THIS REQUIRES SEARCH &\n  REPLACING ALL INSTANCES OF `from haystack.sites import site` TO `from\n  haystack import site`. [Daniel Lindsley]\n\n  No changes needed if you've been using `haystack.autodiscover()`.\n- Pushed save/delete signal registration down to the SearchIndex level.\n  [Daniel Lindsley]\n\n  This should make it easier to alter how individual indexes are setup, allowing you to queue updates, prevent deletions, etc. The internal API changed slightly.\n- Created a default 'clean' implementation, as the first three (and soon\n  fourth) backends all use identical code. [Daniel Lindsley]\n- Updated tests to match new 'model_choices'. [Daniel Lindsley]\n- Added timeout support to Solr. [Daniel Lindsley]\n- Capitalize the Models in the model_choices. [Daniel Lindsley]\n- Removed unnecessary import. [Daniel Lindsley]\n- No longer need to watch for DEBUG in the 'haystack_info' command.\n  [Daniel Lindsley]\n- Fixed bug in Whoosh backend when spelling suggestions are disabled.\n  [Daniel Lindsley]\n- Added a \"clear_search_index\" management command. [Daniel Lindsley]\n- Removed comments as pysolr now supports timeouts and the other comment\n  no longer applies. [Daniel Lindsley]\n- Removed Solr-flavored schema bits. [Daniel Lindsley]\n\n  Still need to work out a better way to handle user created fields that don't fit neatly into subclassing one of the core Field types.\n- Moved informational messages to a management command to behave better\n  when using dumpdata or wsgi. [Daniel Lindsley]\n- Changed some Solr-specific field names. Requires a reindex. [Daniel\n  Lindsley]\n- Typo'd docstring. [Daniel Lindsley]\n- Removed empty test file from spelling testing. [Daniel Lindsley]\n- Documentation for getting spelling support working on Solr. [Daniel\n  Lindsley]\n- Initial spelling support added. [Daniel Lindsley]\n- Added a 'more_like_this' template tag. [Daniel Lindsley]\n- Removed an unnecessary 'run'. This cause MLT (and potentially\n  'raw_search') to fail by overwriting the results found. [Daniel\n  Lindsley]\n- Added Whoosh failure. Needs inspecting. [Daniel Lindsley]\n- Finally added views/forms documentation. A touch rough still. [Daniel\n  Lindsley]\n- Fixed a bug in FacetedSearchView where a SearchQuerySet method could\n  be called on an empty list instead. [Daniel Lindsley]\n- More faceting documentation. [Daniel Lindsley]\n- Started faceting documentation. [Daniel Lindsley]\n- Updated docs to finally include details about faceting. [Daniel\n  Lindsley]\n- Empty or one character searches in Whoosh returned the wrong data\n  structure. Thanks for catching this, silviogutierrez! [Daniel\n  Lindsley]\n- Added scoring to Whoosh now that 0.1.20+ support it. [Daniel Lindsley]\n- Fixed a bug in the Solr tests due to recent changes in pysolr. [Daniel\n  Lindsley]\n- Added documentation on the 'narrow' method. [Daniel Lindsley]\n- Added additional keyword arguments on raw_search. [Daniel Lindsley]\n- Added 'narrow' support in Whoosh. [Daniel Lindsley]\n- Fixed Whoosh backend's handling of pre-1900 dates. Thanks JoeGermuska!\n  [Daniel Lindsley]\n- Backed out the Whoosh quoted dates patch. [Daniel Lindsley]\n\n  Something still seems amiss in the Whoosh query parser, as ranges and dates together don't seem to get parsed together properly.\n- Added a small requirements section to the docs. [Daniel Lindsley]\n- Added notes about enabling the MoreLikeThisHandler within Solr.\n  [Daniel Lindsley]\n- Revised how tests are done so each backend now gets its own test app.\n  [Daniel Lindsley]\n\n  All tests pass once again.\n- Added 'startswith' filter. [Daniel Lindsley]\n- Fixed the __repr__ method on QueryFilters. Thanks JoeGermuska for the\n  original patch! [Daniel Lindsley]\n- BACKWARDS INCOMPATIBLE - Both the Solr & Whoosh backends now provide\n  native Python types back in SearchResults. [Daniel Lindsley]\n\n  This also allows Whoosh to use native types better from the 'SearchQuerySet' API itself.\n\n  This unfortunately will also require all Whoosh users to reindex, as the way some data (specifically datetimes/dates but applicable to others) is stored in the index.\n- SearchIndexes now support inheritance. Thanks smulloni! [Daniel\n  Lindsley]\n- Added FacetedSearchForm to make handling facets easier. [Daniel\n  Lindsley]\n- Heavily refactored the SearchView to take advantage of being a class.\n  [Daniel Lindsley]\n\n  It should now be much easier to override bits without having to copy-paste the entire __call__ method, which was more than slightly embarrassing before.\n- Fixed Solr backend so that it properly converts native Python types to\n  something Solr can handle. Thanks smulloni for the original patch!\n  [Daniel Lindsley]\n- SearchResults now include a verbose name for display purposes. [Daniel\n  Lindsley]\n- Fixed reverse order_by's when using Whoosh. Thanks matt_c for the\n  original patch. [Daniel Lindsley]\n- Handle Whoosh stopwords behavior when provided a single character\n  query string. [Daniel Lindsley]\n- Lightly refactored tests to only run engines with their own settings.\n  [Daniel Lindsley]\n- Typo'd the tutorial when setting up your own SearchSite. Thanks\n  mcroydon! [Daniel Lindsley]\n- Altered loading statements to only display when DEBUG is True. [Daniel\n  Lindsley]\n- Write to STDERR where appropriate. Thanks zerok for suggesting this\n  change. [Daniel Lindsley]\n- BACKWARD INCOMPATIBLE - Altered the search query param to 'q' instead\n  of 'query'. Thanks simonw for prompting this change. [Daniel Lindsley]\n- Removed the Whoosh patch in favor of better options. Please see the\n  documentation. [Daniel Lindsley]\n- Added Whoosh patch for 0.1.15 to temporarily fix reindexes. [Daniel\n  Lindsley]\n- Altered the reindex command to handle inherited models. Thanks\n  smulloni! [Daniel Lindsley]\n- Removed the no longer needed Whoosh patch. [Daniel Lindsley]\n\n  Whoosh users should upgrade to the latest Whoosh (0.1.15) as it fixes the issues that the patch covers as well as others.\n- Documented the 'content' shortcut. [Daniel Lindsley]\n- Fixed an incorrect bit of documentation on the default operator\n  setting. Thanks benspaulding! [Daniel Lindsley]\n- Added documentation about Haystack's various settings. [Daniel\n  Lindsley]\n- Corrected an issue with the Whoosh backend that can occur when no\n  indexes are registered. Now provides a better exception. [Daniel\n  Lindsley]\n- Documentation fixes. Thanks benspaulding! [Daniel Lindsley]\n- Fixed Whoosh patch, which should help with the \"KeyError\" exceptions\n  when searching with models. Thanks Matias Costa! [Daniel Lindsley]\n- Improvements to the setup.py. Thanks jezdez & ask! [Daniel Lindsley]\n- Fixed the .gitignore. Thanks ask! [Daniel Lindsley]\n- FacetedSearchView now inherits from SearchView. Thanks cyberdelia!\n  [Daniel Lindsley]\n\n  This will matter much more soon, as SearchView is going to be refactored to be more useful and extensible.\n- Documentation fixes. [Daniel Lindsley]\n- Altered the whoosh patch. Should apply cleanly now. [Daniel Lindsley]\n- Better linking to the search engine installation notes. [Daniel\n  Lindsley]\n- Added documentation on setting up the search engines. [Daniel\n  Lindsley]\n- Provide an exception when importing a backend dependency fails. Thanks\n  brosner for the initial patch. [Daniel Lindsley]\n- Yay stupid typos! [Daniel Lindsley]\n- Relicensing under BSD. Thanks matt_c for threatening to use my name in\n  an endorsement of a derived product! [Daniel Lindsley]\n- Fixed a bug in ModelSearchForm. Closes #1. Thanks dotsphinx! [Daniel\n  Lindsley]\n- Added link to pysolr binding. [Daniel Lindsley]\n- Refined documentation on preparing SearchIndex data. [Daniel Lindsley]\n- Changed existing references from 'model_name' to 'module_name'.\n  [Daniel Lindsley]\n\n  This was done to be consistent both internally and with Django. Thanks brosner!\n- Documentation improvements. Restyled and friendlier intro page.\n  [Daniel Lindsley]\n- Added documentation on preparing data. [Daniel Lindsley]\n- Additions and re-prioritizing the TODO list. [Daniel Lindsley]\n- Added warnings to Whoosh backend in place of silently ignoring\n  unsupported features. [Daniel Lindsley]\n- Corrected Xapian's capabilities. Thanks richardb! [Daniel Lindsley]\n- BACKWARD INCOMPATIBLE - Altered all settings to be prefixed with\n  HAYSTACK_. Thanks Collin! [Daniel Lindsley]\n- Test cleanup from previous commits. [Daniel Lindsley]\n- Changed the DEFAULT_OPERATOR back to 'AND'. Thanks richardb! [Daniel\n  Lindsley]\n- Altered the way registrations get handled. [Daniel Lindsley]\n- Various fixes. Thanks brosner! [Daniel Lindsley]\n- Added new 'should_update' method to documentation. [Daniel Lindsley]\n- Added 'should_update' method to SearchIndexes. [Daniel Lindsley]\n\n  This allows you to control, on a per-index basis, what conditions will cause an individual object to reindex. Useful for models that update frequently with changes that don't require indexing.\n- Added FAQ docs. [Daniel Lindsley]\n- Alter Whoosh backend to commit regardless. This avoids locking issues\n  that can occur on higher volume sites. [Daniel Lindsley]\n- A more efficient implementation of index clearing in Whoosh. [Daniel\n  Lindsley]\n- Added details about settings needed in settings.py. [Daniel Lindsley]\n- Added setup.py. Thanks cyberdelia for prompting it. [Daniel Lindsley]\n- Reindex management command now can reindex a limited range (like last\n  24 hours). Thanks traviscline. [Daniel Lindsley]\n- More things to do. [Daniel Lindsley]\n- Documentation formatting fixes. [Daniel Lindsley]\n- Added SearchBackend docs. [Daniel Lindsley]\n- Corrected reST formatting. [Daniel Lindsley]\n- Additional TODO's. [Daniel Lindsley]\n- Initial SearchIndex documentation. [Daniel Lindsley]\n- Formally introduced the TODO. [Daniel Lindsley]\n- Updated backend support list. [Daniel Lindsley]\n- Added initial documentation for SearchSites. [Daniel Lindsley]\n- Changed whoosh backend to fix limiting sets. Need to revisit someday.\n  [Daniel Lindsley]\n- Added patch for Whoosh backend and version notes in documentation.\n  [Daniel Lindsley]\n- Initial Whoosh backend complete. [Daniel Lindsley]\n\n  Does not yet support highlighting or scoring.\n- Removed some unnecessary dummy code. [Daniel Lindsley]\n- Work on trying to get the default site to load reliably in all cases.\n  [Daniel Lindsley]\n- Trimmed down the urls for tests now that the dummy backend works\n  correctly. [Daniel Lindsley]\n- Dummy now correctly loads the right SearchBackend. [Daniel Lindsley]\n- Removed faceting from the default SearchView. [Daniel Lindsley]\n- Refactored tests so they are no longer within the haystack app.\n  [Daniel Lindsley]\n\n  Further benefits include less mocking and haystack's tests no longer contributing overall testing of end-user apps. Documentation included.\n- Removed old comment. [Daniel Lindsley]\n- Fixed a potential race condition. Also, since there's no way to tell\n  when everything is ready to go in Django, adding an explicit call to\n  SearchQuerySet's __init__ to force the site to load if it hasn't\n  already. [Daniel Lindsley]\n- More tests on models() support. [Daniel Lindsley]\n- Pulled schema building out into the site to leverage across backends.\n  [Daniel Lindsley]\n- Altered backend loading for consistency with Django and fixed the\n  long-incorrect-for-non-obvious-and-tedious-reasons version number.\n  Still beta but hopefully that changes soon. [Daniel Lindsley]\n- Missed a spot when fixing SearchSites. [Daniel Lindsley]\n- BACKWARD INCOMPATIBLE - Created a class name conflict during the last\n  change (double use of ``SearchIndex``). Renamed original\n  ``SearchIndex`` to ``SearchSite``, which is slightly more correct\n  anyhow. [Daniel Lindsley]\n\n  This will only affect you if you've custom built sites (i.e. not used ``autodiscover()``.\n- More documentation. Started docs on SearchQuery. [Daniel Lindsley]\n- Further fleshed out SearchQuerySet documentation. [Daniel Lindsley]\n- BACKWARD INCOMPATIBLE (2 of 2) - Altered autodiscover to search for\n  'search_indexes.py' instead of 'indexes.py' to prevent collisions and\n  be more descriptive. [Daniel Lindsley]\n- BACKWARD INCOMPATIBLE (1 of 2) - The ModelIndex class has been renamed\n  to be SearchIndex to make room for future improvements. [Daniel\n  Lindsley]\n- Fleshed out a portion of the SearchQuerySet documentation. [Daniel\n  Lindsley]\n- SearchQuerySet.auto_query now supports internal quoting for exact\n  matches. [Daniel Lindsley]\n- Fixed semi-serious issue with SearchQuery objects, causing bits to\n  leak from one query to the next when cloning. [Daniel Lindsley]\n- Altered Solr port for testing purposes. [Daniel Lindsley]\n- Now that Solr and core feature set are solid, moved haystack into beta\n  status. [Daniel Lindsley]\n- Added simple capabilities for retrieving facets back. [Daniel\n  Lindsley]\n- Bugfix to make sure model choices don't get loaded until after the\n  IndexSite is populated. [Daniel Lindsley]\n- Initial faceting support complete. [Daniel Lindsley]\n- Query facets tested. [Daniel Lindsley]\n- Bugfix to (field) facets. [Daniel Lindsley]\n\n  Using a dict is inappropriate, as the output from Solr\n  is sorted by count. Now using a two-tuple.\n- Backward-incompatible changes to faceting. Date-based faceting is now\n  present. [Daniel Lindsley]\n- Solr implementation of faceting started. Needs more tests. [Daniel\n  Lindsley]\n- Initial faceting support in place. Needs more thought and a Solr\n  implementation. [Daniel Lindsley]\n- Unbreak iterables in queries. [Daniel Lindsley]\n- Bugfixes for Unicode handling and loading deleted models. [Daniel\n  Lindsley]\n- Fixed bug in Solr's run method. [Daniel Lindsley]\n- Various bug fixes. [Daniel Lindsley]\n- Backward-Incompatible: Refactored ModelIndexes to allow greater\n  customization before indexing. See \"prepare()\" methods. [Daniel\n  Lindsley]\n- Updated \"build_solr_schema\" command for revised fields. [Daniel\n  Lindsley]\n- Refactored SearchFields. Lightly backwards-incompatible. [Daniel\n  Lindsley]\n- No more duplicates from the \"build_solr_schema\" management command.\n  [Daniel Lindsley]\n- Removed the kwargs. Explicit is better than implicit. [Daniel\n  Lindsley]\n- Tests for highlighting. [Daniel Lindsley]\n- Added initial highlighting support. Needs tests and perhaps a better\n  implementation. [Daniel Lindsley]\n- Started \"build_solr_schema\" command. Needs testing with more than one\n  index. [Daniel Lindsley]\n- Argh. \".select_related()\" is killing reindexes. Again. [Daniel\n  Lindsley]\n- Stored fields now come back as part of the search result. [Daniel\n  Lindsley]\n- Fixed Solr's SearchQuery.clean to handle reserved words more\n  appropriately. [Daniel Lindsley]\n- Filter types seem solid and have tests. [Daniel Lindsley]\n- App renamed (for namespace/sanity/because it's really different\n  reasons). [Daniel Lindsley]\n- Started trying to support the various filter types. Needs testing and\n  verification. [Daniel Lindsley]\n- Fixed tests in light of the change to \"OR\". [Daniel Lindsley]\n- Readded \"select_related\" to reindex command. [Daniel Lindsley]\n- I am a moron. [Daniel Lindsley]\n- \"OR\" is now the default operator. Also, \"auto_query\" now handles\n  not'ed keywords. [Daniel Lindsley]\n- \"More Like This\" now implemented and functioning with Solr backend.\n  [Daniel Lindsley]\n- Removed broken references to __name__. [Daniel Lindsley]\n- Internal documentation fix. [Daniel Lindsley]\n- Solr backend can now clear on a per-model basis. [Daniel Lindsley]\n- Solr backend tests fleshed out. Initial stability of Solr. [Daniel\n  Lindsley]\n\n  This needs more work (as does everything) but it seems to be working reliably from my testing (both unit and \"real-world\"). Onward and upward.\n- Massive renaming/refactoring spree. Tests 100% passing again. [Daniel\n  Lindsley]\n- Renamed BaseSearchQuerySet to SearchQuerySet. Now requires\n  instantiation. [Daniel Lindsley]\n- Standardizing syntax. [Daniel Lindsley]\n- Backend support update. [Daniel Lindsley]\n- An attempt to make sure the main IndexSite is always setup, even\n  outside web requests. Also needs improvement. [Daniel Lindsley]\n- Reindexes now work. [Daniel Lindsley]\n- Some painful bits to make things work for now. Needs improvement.\n  [Daniel Lindsley]\n- Support kwargs on the search. [Daniel Lindsley]\n- Move solr backend tests in prep for fully testing the backend. [Daniel\n  Lindsley]\n- Some ContentField/StoredField improvements. [Daniel Lindsley]\n\n  StoredFields now have a unique template per field (as they should have from the start) and there's a touch more checking. You can also now override the template name for either type of field.\n- Fixed backend loading upon unpickling SearchBackend. [Daniel Lindsley]\n- Tweak internal doc. [Daniel Lindsley]\n- MOAR DOCS. [Daniel Lindsley]\n- Internal documentation and cleanup. Also alters the behavior of\n  SearchQuerySet's \"order_by\" method slightly, bringing it more in-line\n  with QuerySet's behavior. [Daniel Lindsley]\n- Documentation/license updates. [Daniel Lindsley]\n- Fixed ModelIndexes and created tests for them. 100% tests passing\n  again. [Daniel Lindsley]\n- Started refactoring ModelIndexes. Needs tests (and possibly a little\n  love). [Daniel Lindsley]\n- Implemented Solr's boost, clean, multiple order-by. Fixed Solr's score\n  retrieval (depends on custom pysolr) and exact match syntax. [Daniel\n  Lindsley]\n- Minor changes/cleanup. [Daniel Lindsley]\n- Updated docs and a FIXME. [Daniel Lindsley]\n- SearchView/SearchForm tests passing. [Daniel Lindsley]\n- Changed BaseSearchQuery to accept a SearchBackend instance instead of\n  the class. [Daniel Lindsley]\n- Better dummy implementation, a bugfix to raw_search and\n  SearchView/SearchForm tests. [Daniel Lindsley]\n- Temporarily changed the Solr backend to ignore fields. Pysolr will\n  need a patch and then reenable this. [Daniel Lindsley]\n- Merge branch 'master' of\n  ssh://daniel@mckenzie/home/daniel/djangosearch_refactor into HEAD.\n  [Daniel Lindsley]\n- Started SearchView tests and added URLconf. [Daniel Lindsley]\n- Started SearchView tests and added URLconf. [Daniel Lindsley]\n- Added note about basic use. Needs refactoring. [Matt Croydon]\n- Merged index.rst. [Matt Croydon]\n- Fixed result lookups when constructing a SearchResult. [Daniel\n  Lindsley]\n- Added more docs. [Daniel Lindsley]\n- Added FIXME for exploration on Solr backend. [Daniel Lindsley]\n- Solr's SearchQuery now handles phrases (exact match). [Daniel\n  Lindsley]\n- More work on the Solr backend. [Daniel Lindsley]\n- Added more imports for future test coverage. [Daniel Lindsley]\n- Added stubs for backend tests. [Daniel Lindsley]\n- Documentation updates. [Daniel Lindsley]\n- Refactored forms/views. Needs tests. [Daniel Lindsley]\n- Removed old entries in .gitignore. [Daniel Lindsley]\n- Implemented load_all. [Daniel Lindsley]\n- Fixed query result retrieval. [Daniel Lindsley]\n- Updated documentation index and tweaked overview formatting. [Matt\n  Croydon]\n- Slight docs improvements. [Daniel Lindsley]\n- Started work on Solr backend. [Daniel Lindsley]\n- Ignore _build. [Matt Croydon]\n- Refactored documentation to format better in Sphinx. [Matt Croydon]\n- Added _build to .gitignore. [Matt Croydon]\n- Added sphinx config for documentation. [Matt Croydon]\n- Verified _fill_cache behavior. 100% test pass. [Daniel Lindsley]\n- Added a couple new desirable bits of functionality. Mostly stubbed.\n  [Daniel Lindsley]\n- Removed fixme and updated docs. [Daniel Lindsley]\n- Removed an old reference to SearchPaginator. [Daniel Lindsley]\n- Updated import paths to new backend Base* location. [Daniel Lindsley]\n- Relocated base backend classes to __init__.py for consistency with\n  Django. [Daniel Lindsley]\n- BaseSearchQuerySet initial API complete and all but working. One\n  failing test related to caching results. [Daniel Lindsley]\n- Added new (improved?) template path for index templates. [Daniel\n  Lindsley]\n- Removed SearchPaginator, as it no longer provides anything over the\n  standard Django Paginator. [Daniel Lindsley]\n- Added len/iter support to BaseSearchQuerySet. Need to finish getitem\n  support and test. [Daniel Lindsley]\n- Started to update ModelIndex. [Daniel Lindsley]\n- Started to alter dummy to match new class names/API. [Daniel Lindsley]\n- Little bits of cleanup. [Daniel Lindsley]\n- Added overview of where functionality belongs in djangosearch. This\n  should likely make it's way into other docs and go away eventually.\n  [Daniel Lindsley]\n- BaseSearchQuery now tracks filters via QueryFilter objects. Tests\n  complete for QueryFilter and nearly complete for BaseSearchQuery.\n  [Daniel Lindsley]\n- Started docs on creating new backends. [Daniel Lindsley]\n- Started tests for BaseSearchQuery and BaseSearchQuerySet. [Daniel\n  Lindsley]\n- Fixed site loading. [Daniel Lindsley]\n- More work on the Base* classes. [Daniel Lindsley]\n- Started docs on creating new backends. [Daniel Lindsley]\n- Yet more work on BaseSearchQuerySet. Now with fewer FIXMEs. [Daniel\n  Lindsley]\n- More work on BaseSearchQuerySet and added initial BaseSearchQuery\n  object. [Daniel Lindsley]\n- Removed another chunk of SearchPaginator as SearchQuerySet becomes\n  more capable. Hopefully, SearchPaginator will simply go away soon.\n  [Daniel Lindsley]\n- Fixed ModelSearchForm to check the site's registered models. [Daniel\n  Lindsley]\n- Reenabled how other backends might load. [Daniel Lindsley]\n- Added ignores. [Daniel Lindsley]\n- Started documenting what backends are supported and what they can do.\n  [Daniel Lindsley]\n- More work on SearchQuerySet. [Daniel Lindsley]\n- More renovation and IndexSite's tests pass 100%. [Daniel Lindsley]\n- Fleshed out sites tests. Need to setup environment in order to run\n  them. [Daniel Lindsley]\n- Started adding tests. [Daniel Lindsley]\n- First blush at SearchQuerySet. Non-functional, trying to lay out API\n  and basic funationality. [Daniel Lindsley]\n- Removed old results.py in favor of the coming SearchQuerySet. [Daniel\n  Lindsley]\n- Noted future improvements on SearchPaginator. [Daniel Lindsley]\n- Removed old reference to autodiscover and added default site a la NFA.\n  [Daniel Lindsley]\n- Commented another use of RELEVANCE. [Daniel Lindsley]\n- Little backend tweaks. [Daniel Lindsley]\n- Added autodiscover support. [Daniel Lindsley]\n- Readded management command. [Daniel Lindsley]\n- Added SearchView and ModelSearchForm back in. Needs a little work.\n  [Daniel Lindsley]\n- Readded results. Need to look at SoC for ideas. [Daniel Lindsley]\n- Readded paginator. Needs docs/tests. [Daniel Lindsley]\n- Readded core backends + solr. Will add others as they reach 100%\n  functionality. [Daniel Lindsley]\n- Added ModelIndex back in. Customized to match new setup. [Daniel\n  Lindsley]\n- Added signal registration as well as some introspection capabilities.\n  [Daniel Lindsley]\n- Initial commit. Basic IndexSite implementation complete. Needs tests.\n  [Daniel Lindsley]\n"
  },
  {
    "path": "docs/conf.py",
    "content": "#\n# Haystack documentation build configuration file, created by\n# sphinx-quickstart on Wed Apr 15 08:50:46 2009.\n#\n# This file is execfile()d with the current directory set to its containing dir.\n#\n# Note that not all possible configuration values are present in this\n# autogenerated file.\n#\n# All configuration values have a default; values that are commented out\n# serve to show the default.\n\nimport os\nimport sys\n\n# If extensions (or modules to document with autodoc) are in another directory,\n# add these directories to sys.path here. If the directory is relative to the\n# documentation root, use os.path.abspath to make it absolute, like shown here.\n# sys.path.append(os.path.abspath('.'))\n\n# -- General configuration -----------------------------------------------------\n\n# Add any Sphinx extension module names here, as strings. They can be extensions\n# coming with Sphinx (named 'sphinx.ext.*') or your custom ones.\nextensions = []\n\n# Add any paths that contain templates here, relative to this directory.\ntemplates_path = [\"_templates\"]\n\n# The suffix of source filenames.\nsource_suffix = \".rst\"\n\n# The encoding of source files.\n# source_encoding = 'utf-8'\n\n# The master toctree document.\nmaster_doc = \"toc\"\n\n# General information about the project.\nproject = \"Haystack\"\ncopyright = \"2009-2016, Daniel Lindsley\"\n\n# The version info for the project you're documenting, acts as replacement for\n# |version| and |release|, also used in various other places throughout the\n# built documents.\n\n# The short X.Y version.\nversion = \"2.5\"\n# The full version, including alpha/beta/rc tags.\nrelease = \"2.5.0\"\n\n# The language for content autogenerated by Sphinx. Refer to documentation\n# for a list of supported languages.\n# language = None\n\n# There are two options for replacing |today|: either, you set today to some\n# non-false value, then it is used:\n# today = ''\n# Else, today_fmt is used as the format for a strftime call.\n# today_fmt = '%B %d, %Y'\n\n# List of documents that shouldn't be included in the build.\n# unused_docs = []\n\n# List of directories, relative to source directory, that shouldn't be searched\n# for source files.\nexclude_trees = [\"_build\"]\n\n# The reST default role (used for this markup: `text`) to use for all documents.\n# default_role = None\n\n# If true, '()' will be appended to :func: etc. cross-reference text.\n# add_function_parentheses = True\n\n# If true, the current module name will be prepended to all description\n# unit titles (such as .. function::).\n# add_module_names = True\n\n# If true, sectionauthor and moduleauthor directives will be shown in the\n# output. They are ignored by default.\n# show_authors = False\n\n# The name of the Pygments (syntax highlighting) style to use.\npygments_style = \"sphinx\"\n\n# A list of ignored prefixes for module index sorting.\n# modindex_common_prefix = []\n\n\n# -- Options for HTML output ---------------------------------------------------\n\n# The theme to use for HTML and HTML Help pages.  Major themes that come with\n# Sphinx are currently 'default' and 'sphinxdoc'.\n# html_theme = 'haystack_theme'\n\n# Theme options are theme-specific and customize the look and feel of a theme\n# further.  For a list of options available for each theme, see the\n# documentation.\n# html_theme_options = {\n#     \"rightsidebar\": \"true\",\n#     \"bodyfont\": \"'Helvetica Neue', Arial, sans-serif\",\n#     \"sidebarbgcolor\": \"#303c0c\",\n#     \"sidebartextcolor\": \"#effbcb\",\n#     \"sidebarlinkcolor\": \"#eef7ab\",\n#     \"relbarbgcolor\": \"#caecff\",\n#     \"relbartextcolor\": \"#262511\",\n#     \"relbarlinkcolor\": \"#262511\",\n#     \"footerbgcolor\": \"#262511\",\n# }\n\n# Add any paths that contain custom themes here, relative to this directory.\nhtml_theme_path = [\".\"]\n\n# The name for this set of Sphinx documents.  If None, it defaults to\n# \"<project> v<release> documentation\".\n# html_title = None\n\n# A shorter title for the navigation bar.  Default is the same as html_title.\n# html_short_title = None\n\n# The name of an image file (relative to this directory) to place at the top\n# of the sidebar.\n# html_logo = None\n\n# The name of an image file (within the static path) to use as favicon of the\n# docs.  This file should be a Windows icon file (.ico) being 16x16 or 32x32\n# pixels large.\n# html_favicon = None\n\n# Add any paths that contain custom static files (such as style sheets) here,\n# relative to this directory. They are copied after the builtin static files,\n# so a file named \"default.css\" will overwrite the builtin \"default.css\".\nhtml_static_path = [\"_static\"]\n\n# If not '', a 'Last updated on:' timestamp is inserted at every page bottom,\n# using the given strftime format.\n# html_last_updated_fmt = '%b %d, %Y'\n\n# If true, SmartyPants will be used to convert quotes and dashes to\n# typographically correct entities.\n# html_use_smartypants = True\n\n# Custom sidebar templates, maps document names to template names.\n# html_sidebars = {}\n\n# Additional templates that should be rendered to pages, maps page names to\n# template names.\n# html_additional_pages = {}\n\n# If false, no module index is generated.\n# html_use_modindex = True\n\n# If false, no index is generated.\n# html_use_index = True\n\n# If true, the index is split into individual pages for each letter.\n# html_split_index = False\n\n# If true, links to the reST sources are added to the pages.\n# html_show_sourcelink = True\n\n# If true, an OpenSearch description file will be output, and all pages will\n# contain a <link> tag referring to it.  The value of this option must be the\n# base URL from which the finished HTML is served.\n# html_use_opensearch = ''\n\n# If nonempty, this is the file name suffix for HTML files (e.g. \".xhtml\").\n# html_file_suffix = ''\n\n# Output file base name for HTML help builder.\nhtmlhelp_basename = \"Haystackdoc\"\n\n\n# -- Options for LaTeX output --------------------------------------------------\n\n# The paper size ('letter' or 'a4').\n# latex_paper_size = 'letter'\n\n# The font size ('10pt', '11pt' or '12pt').\n# latex_font_size = '10pt'\n\n# Grouping the document tree into LaTeX files. List of tuples\n# (source start file, target name, title, author, documentclass [howto/manual]).\nlatex_documents = [\n    (\"index\", \"Haystack.tex\", \"Haystack Documentation\", \"Daniel Lindsley\", \"manual\")\n]\n\n# The name of an image file (relative to this directory) to place at the top of\n# the title page.\n# latex_logo = None\n\n# For \"manual\" documents, if this is true, then toplevel headings are parts,\n# not chapters.\n# latex_use_parts = False\n\n# Additional stuff for the LaTeX preamble.\n# latex_preamble = ''\n\n# Documents to append as an appendix to all manuals.\n# latex_appendices = []\n\n# If false, no module index is generated.\n# latex_use_modindex = True\n"
  },
  {
    "path": "docs/contributing.rst",
    "content": "============\nContributing\n============\n\nHaystack is open-source and, as such, grows (or shrinks) & improves in part\ndue to the community. Below are some guidelines on how to help with the project.\n\n\nPhilosophy\n==========\n\n* Haystack is BSD-licensed. All contributed code must be either\n\n  * the original work of the author, contributed under the BSD, or...\n  * work taken from another project released under a BSD-compatible license.\n\n* GPL'd (or similar) works are not eligible for inclusion.\n* Haystack's git master branch should always be stable, production-ready &\n  passing all tests.\n* Major releases (1.x.x) are commitments to backward-compatibility of the public APIs.\n  Any documented API should ideally not change between major releases.\n  The exclusion to this rule is in the event of either a security issue\n  or to accommodate changes in Django itself.\n* Minor releases (x.3.x) are for the addition of substantial features or major\n  bugfixes.\n* Patch releases (x.x.4) are for minor features or bugfixes.\n\n\nGuidelines For Reporting An Issue/Feature\n=========================================\n\nSo you've found a bug or have a great idea for a feature. Here's the steps you\nshould take to help get it added/fixed in Haystack:\n\n* First, check to see if there's an existing issue/pull request for the\n  bug/feature. All issues are at https://github.com/toastdriven/django-haystack/issues\n  and pull reqs are at https://github.com/toastdriven/django-haystack/pulls.\n* If there isn't one there, please file an issue. The ideal report includes:\n\n  * A description of the problem/suggestion.\n  * How to recreate the bug.\n  * If relevant, including the versions of your:\n\n    * Python interpreter\n    * Django\n    * Haystack\n    * Search engine used (as well as bindings)\n    * Optionally of the other dependencies involved\n\n  * Ideally, creating a pull request with a (failing) test case demonstrating\n    what's wrong. This makes it easy for us to reproduce & fix the problem.\n    Instructions for running the tests are at :doc:`index`\n\nYou might also hop into the IRC channel (``#haystack`` on ``irc.freenode.net``)\n& raise your question there, as there may be someone who can help you with a\nwork-around.\n\n\nGuidelines For Contributing Code\n================================\n\nIf you're ready to take the plunge & contribute back some code/docs, the\nprocess should look like:\n\n* Fork the project on GitHub into your own account.\n* Clone your copy of Haystack.\n* Make a new branch in git & commit your changes there.\n* Push your new branch up to GitHub.\n* Again, ensure there isn't already an issue or pull request out there on it.\n  If there is & you feel you have a better fix, please take note of the issue\n  number & mention it in your pull request.\n* Create a new pull request (based on your branch), including what the\n  problem/feature is, versions of your software & referencing any related\n  issues/pull requests.\n\nIn order to be merged into Haystack, contributions must have the following:\n\n* A solid patch that:\n\n  * is clear.\n  * works across all supported versions of Python/Django.\n  * follows the existing style of the code base formatted with\n    isort_ and Black_ using the provided configuration in the repo\n  * comments included as needed.\n\n* A test case that demonstrates the previous flaw that now passes\n  with the included patch.\n* If it adds/changes a public API, it must also include documentation\n  for those changes.\n* Must be appropriately licensed (see \"Philosophy\").\n* Adds yourself to the AUTHORS file.\n\nIf your contribution lacks any of these things, they will have to be added\nby a core contributor before being merged into Haystack proper, which may take\nsubstantial time for the all-volunteer team to get to.\n\n.. _isort: https://pypi.org/project/isort/\n.. _Black: https://pypi.org/project/black/\n\nGuidelines For Core Contributors\n================================\n\nIf you've been granted the commit bit, here's how to shepherd the changes in:\n\n* Any time you go to work on Haystack, please use ``git pull --rebase`` to fetch\n  the latest changes.\n* Any new features/bug fixes must meet the above guidelines for contributing\n  code (solid patch/tests passing/docs included).\n* Commits are typically cherry-picked onto a branch off master.\n\n  * This is done so as not to include extraneous commits, as some people submit\n    pull reqs based on their git master that has other things applied to it.\n\n* A set of commits should be squashed down to a single commit.\n\n  * ``git merge --squash`` is a good tool for performing this, as is\n    ``git rebase -i HEAD~N``.\n  * This is done to prevent anyone using the git repo from accidently pulling\n    work-in-progress commits.\n\n* Commit messages should use past tense, describe what changed & thank anyone\n  involved. Examples::\n\n    \"\"\"Added support for the latest version of Whoosh (v2.3.2).\"\"\"\n    \"\"\"Fixed a bug in ``solr_backend.py``. Thanks to joeschmoe for the report!\"\"\"\n    \"\"\"BACKWARD-INCOMPATIBLE: Altered the arguments passed to ``SearchBackend``.\n\n    Further description appears here if the change warrants an explanation\n    as to why it was done.\"\"\"\n\n* For any patches applied from a contributor, please ensure their name appears\n  in the AUTHORS file.\n* When closing issues or pull requests, please reference the SHA in the closing\n  message (i.e. ``Thanks! Fixed in SHA: 6b93f6``). GitHub will automatically\n  link to it.\n"
  },
  {
    "path": "docs/creating_new_backends.rst",
    "content": ".. _ref-creating-new-backends:\n\n=====================\nCreating New Backends\n=====================\n\nThe process should be fairly simple.\n\n#. Create new backend file. Name is important.\n#. Two classes inside.\n\n   #. SearchBackend (inherit from haystack.backends.BaseSearchBackend)\n   #. SearchQuery (inherit from haystack.backends.BaseSearchQuery)\n\n\nSearchBackend\n=============\n\nResponsible for the actual connection and low-level details of interacting with\nthe backend.\n\n* Connects to search engine\n* Method for saving new docs to index\n* Method for removing docs from index\n* Method for performing the actual query\n\n\nSearchQuery\n===========\n\nResponsible for taking structured data about the query and converting it into a\nbackend appropriate format.\n\n* Method for creating the backend specific query - ``build_query``.\n"
  },
  {
    "path": "docs/debugging.rst",
    "content": ".. ref-debugging:\n\n==================\nDebugging Haystack\n==================\n\nThere are some common problems people run into when using Haystack for the first\ntime. Some of the common problems and things to try appear below.\n\n.. note::\n\n    As a general suggestion, your best friend when debugging an issue is to\n    use the ``pdb`` library included with Python. By dropping a\n    ``import pdb; pdb.set_trace()`` in your code before the issue occurs, you\n    can step through and examine variable/logic as you progress through. Make\n    sure you don't commit those ``pdb`` lines though.\n\n\n\"No module named haystack.\"\n===========================\n\nThis problem usually occurs when first adding Haystack to your project.\n\n* Are you using the ``haystack`` directory within your ``django-haystack``\n  checkout/install?\n* Is the ``haystack`` directory on your ``PYTHONPATH``? Alternatively, is\n  ``haystack`` symlinked into your project?\n* Start a Django shell (``./manage.py shell``) and try ``import haystack``.\n  You may receive a different, more descriptive error message.\n* Double-check to ensure you have no circular imports. (i.e. module A tries\n  importing from module B which is trying to import from module A.)\n\n\n\"No results found.\" (On the web page)\n=====================================\n\nSeveral issues can cause no results to be found. Most commonly it is either\nnot running a ``rebuild_index`` to populate your index or having a blank\n``document=True`` field, resulting in no content for the engine to search on.\n\n* Do you have a ``search_indexes.py`` located within an installed app?\n* Do you have data in your database?\n* Have you run a ``./manage.py rebuild_index`` to index all of your content?\n* Try running ``./manage.py rebuild_index -v2`` for more verbose output to\n  ensure data is being processed/inserted.\n* Start a Django shell (``./manage.py shell``) and try::\n\n  >>> from haystack.query import SearchQuerySet\n  >>> sqs = SearchQuerySet().all()\n  >>> sqs.count()\n\n* You should get back an integer > 0. If not, check the above and reindex.\n\n  >>> sqs[0] # Should get back a SearchResult object.\n  >>> sqs[0].id # Should get something back like 'myapp.mymodel.1'.\n  >>> sqs[0].text # ... or whatever your document=True field is.\n\n* If you get back either ``u''`` or ``None``, it means that your data isn't\n  making it into the main field that gets searched. You need to check that the\n  field either has a template that uses the model data, a ``model_attr`` that\n  pulls data directly from the model or a ``prepare/prepare_FOO`` method that\n  populates the data at index time.\n* Check the template for your search page and ensure it is looping over the\n  results properly. Also ensure that it's either accessing valid fields coming\n  back from the search engine or that it's trying to access the associated\n  model via the ``{{ result.object.foo }}`` lookup.\n\n\n\"LockError: [Errno 17] File exists: '/path/to/whoosh_index/_MAIN_LOCK'\"\n=======================================================================\n\nThis is a Whoosh-specific traceback. It occurs when the Whoosh engine in one\nprocess/thread is locks the index files for writing while another process/thread\ntries to access them. This is a common error when using ``RealtimeSignalProcessor``\nwith Whoosh under any kind of load, which is why it's only recommended for\nsmall sites or development.\n\nThe only real solution is to set up a cron job that runs\n``./manage.py rebuild_index`` (optionally with ``--age=24``) that runs nightly\n(or however often you need) to refresh the search indexes. Then disable the\nuse of the ``RealtimeSignalProcessor`` within your settings.\n\nThe downside to this is that you lose real-time search. For many people, this\nisn't an issue and this will allow you to scale Whoosh up to a much higher\ntraffic. If this is not acceptable, you should investigate either the Solr or\nXapian backends.\n\n\n\"Failed to add documents to Solr: [Reason: None]\"\n=================================================\n\nThis is a Solr-specific traceback. It generally occurs when there is an error\nwith your ``HAYSTACK_CONNECTIONS[<alias>]['URL']``. Since Solr acts as a webservice, you should\ntest the URL in your web browser. If you receive an error, you may need to\nchange your URL.\n\nThis can also be caused when using old versions of pysolr (2.0.9 and before) with httplib2 and\nincluding a trailing slash in your ``HAYSTACK_CONNECTIONS[<alias>]['URL']``. If this applies to\nyou, please upgrade to the current version of pysolr.\n\n\n\"Got an unexpected keyword argument 'boost'\"\n============================================\n\nThis is a Solr-specific traceback. This can also be caused when using old\nversions of pysolr (2.0.12 and before). Please upgrade your version of\npysolr (2.0.13+).\n"
  },
  {
    "path": "docs/faceting.rst",
    "content": ".. _ref-faceting:\n\n========\nFaceting\n========\n\nWhat Is Faceting?\n-----------------\n\nFaceting is a way to provide users with feedback about the number of documents\nwhich match terms they may be interested in. At its simplest, it gives\ndocument counts based on words in the corpus, date ranges, numeric ranges or\neven advanced queries.\n\nFaceting is particularly useful when trying to provide users with drill-down\ncapabilities. The general workflow in this regard is:\n\n  #. You can choose what you want to facet on.\n  #. The search engine will return the counts it sees for that match.\n  #. You display those counts to the user and provide them with a link.\n  #. When the user chooses a link, you narrow the search query to only include\n     those conditions and display the results, potentially with further facets.\n\n.. note::\n\n    Faceting can be difficult, especially in providing the user with the right\n    number of options and/or the right areas to be able to drill into. This\n    is unique to every situation and demands following what real users need.\n    \n    You may want to consider logging queries and looking at popular terms to\n    help you narrow down how you can help your users.\n\nHaystack provides functionality so that all of the above steps are possible.\nFrom the ground up, let's build a faceted search setup. This assumes that you \nhave been to work through the :doc:`tutorial` and have a working Haystack\ninstallation. The same setup from the :doc:`tutorial` applies here.\n\n1. Determine Facets And ``SearchQuerySet``\n------------------------------------------\n\nDetermining what you want to facet on isn't always easy. For our purposes,\nwe'll facet on the ``author`` field.\n\nIn order to facet effectively, the search engine should store both a standard\nrepresentation of your data as well as exact version to facet on. This is\ngenerally accomplished by duplicating the field and storing it via two\ndifferent types. Duplication is suggested so that those fields are still\nsearchable in the standard ways.\n\nTo inform Haystack of this, you simply pass along a ``faceted=True`` parameter\non the field(s) you wish to facet on. So to modify our existing example::\n\n    class NoteIndex(SearchIndex, indexes.Indexable):\n        text = CharField(document=True, use_template=True)\n        author = CharField(model_attr='user', faceted=True)\n        pub_date = DateTimeField(model_attr='pub_date')\n\nHaystack quietly handles all of the backend details for you, creating a similar\nfield to the type you specified with ``_exact`` appended. Our example would now\nhave both a ``author`` and ``author_exact`` field, though this is largely an\nimplementation detail.\n\nTo pull faceting information out of the index, we'll use the\n``SearchQuerySet.facet`` method to setup the facet and the\n``SearchQuerySet.facet_counts`` method to retrieve back the counts seen.\n\nExperimenting in a shell (``./manage.py shell``) is a good way to get a feel\nfor what various facets might look like::\n\n    >>> from haystack.query import SearchQuerySet\n    >>> sqs = SearchQuerySet().facet('author')\n    >>> sqs.facet_counts()\n    {\n        'dates': {},\n        'fields': {\n            'author': [\n                ('john', 4),\n                ('daniel', 2),\n                ('sally', 1),\n                ('terry', 1),\n            ],\n        },\n        'queries': {}\n    }\n\n.. note::\n\n    Note that, despite the duplication of fields, you should provide the\n    regular name of the field when faceting. Haystack will intelligently\n    handle the underlying details and mapping.\n\nAs you can see, we get back a dictionary which provides access to the three\ntypes of facets available: ``fields``, ``dates`` and ``queries``. Since we only\nfaceted on the ``author`` field (which actually facets on the ``author_exact``\nfield managed by Haystack), only the ``fields`` key has any data\nassociated with it. In this case, we have a corpus of eight documents with four\nunique authors.\n\n.. note::\n    Facets are chainable, like most ``SearchQuerySet`` methods. However, unlike\n    most ``SearchQuerySet`` methods, they are *NOT* affected by ``filter`` or\n    similar methods. The only method that has any effect on facets is the\n    ``narrow`` method (which is how you provide drill-down).\n\nConfiguring facet behaviour\n~~~~~~~~~~~~~~~~~~~~~~~~~~~\n\nYou can configure the behaviour of your facets by passing options\nfor each facet in your SearchQuerySet. These options can be backend specific.\n\n**limit**\n*tested on Solr*\n\nThe ``limit`` parameter limits the results for each query. On Solr, the default `facet.limit`_ is 100 and a\nnegative number removes the limit.\n\n.. _facet.limit: https://wiki.apache.org/solr/SimpleFacetParameters#facet.limit\n\nExample usage::\n\n    >>> from haystack.query import SearchQuerySet\n    >>> sqs = SearchQuerySet().facet('author', limit=-1)\n    >>> sqs.facet_counts()\n    {\n        'dates': {},\n        'fields': {\n            'author': [\n                ('abraham', 1),\n                ('benny', 2),\n                ('cindy', 1),\n                ('diana', 5),\n            ],\n        },\n        'queries': {}\n    }\n\n    >>> sqs = SearchQuerySet().facet('author', limit=2)\n    >>> sqs.facet_counts()\n    {\n        'dates': {},\n        'fields': {\n            'author': [\n                ('abraham', 1),\n                ('benny', 2),\n            ],\n        },\n        'queries': {}\n    }\n\n**sort**\n*tested on Solr*\n\nThe ``sort`` parameter will sort the results for each query. Solr's default\n`facet.sort`_ is ``index``, which will sort the facets alphabetically. Changing\nthe parameter to ``count`` will sort the facets by the number of results for\neach facet value.\n\n.. _facet.sort: https://wiki.apache.org/solr/SimpleFacetParameters#facet.sort\n\n\nExample usage::\n\n    >>> from haystack.query import SearchQuerySet\n    >>> sqs = SearchQuerySet().facet('author', sort='index', )\n    >>> sqs.facet_counts()\n    {\n        'dates': {},\n        'fields': {\n            'author': [\n                ('abraham', 1),\n                ('benny', 2),\n                ('cindy', 1),\n                ('diana', 5),\n            ],\n        },\n        'queries': {}\n    }\n\n    >>> sqs = SearchQuerySet().facet('author', sort='count', )\n    >>> sqs.facet_counts()\n    {\n        'dates': {},\n        'fields': {\n            'author': [\n                ('diana', 5),\n                ('benny', 2),\n                ('abraham', 1),\n                ('cindy', 1),\n            ],\n        },\n        'queries': {}\n    }\n\n\nNow that we have the facet we want, it's time to implement it.\n\n2. Switch to the ``FacetedSearchView`` and ``FacetedSearchForm``\n----------------------------------------------------------------\n\nThere are three things that we'll need to do to expose facets to our frontend.\nThe first is construct the ``SearchQuerySet`` we want to use. We should have\nthat from the previous step. The second is to switch to the\n``FacetedSearchView``. This view is useful because it prepares the facet counts\nand provides them in the context as ``facets``.\n\nOptionally, the third step is to switch to the ``FacetedSearchForm``. As it\ncurrently stands, this is only useful if you want to provide drill-down, though\nit may provide more functionality in the future. We'll do it for the sake of\nhaving it in place but know that it's not required.\n\nIn your URLconf, you'll need to switch to the ``FacetedSearchView``. Your\nURLconf should resemble::\n\n    from django.conf.urls import url\n    from haystack.forms import FacetedSearchForm\n    from haystack.views import FacetedSearchView\n    \n    \n    urlpatterns = [\n        url(r'^$', FacetedSearchView(form_class=FacetedSearchForm, facet_fields=['author']), name='haystack_search'),\n    ]\n\nThe ``FacetedSearchView`` will now instantiate the ``FacetedSearchForm``.\nThe specified ``facet_fields`` will be present in the context variable\n``facets``. This is added in an overridden ``extra_context`` method.\n\n\n3. Display The Facets In The Template\n-------------------------------------\n\nTemplating facets involves simply adding an extra bit of processing to display\nthe facets (and optionally to link to provide drill-down). An example template\nmight look like this::\n\n    <form method=\"get\" action=\".\">\n        <table>\n            <tbody>\n                {{ form.as_table }}\n                <tr>\n                    <td>&nbsp;</td>\n                    <td><input type=\"submit\" value=\"Search\"></td>\n                </tr>\n            </tbody>\n        </table>\n    </form>\n    \n    {% if query %}\n        <!-- Begin faceting. -->\n        <h2>By Author</h2>\n    \n        <div>\n            <dl>\n                {% if facets.fields.author %}\n                    <dt>Author</dt>\n                    {# Provide only the top 5 authors #}\n                    {% for author in facets.fields.author|slice:\":5\" %}\n                        <dd><a href=\"{{ request.get_full_path }}&amp;selected_facets=author_exact:{{ author.0|urlencode }}\">{{ author.0 }}</a> ({{ author.1 }})</dd>\n                    {% endfor %}\n                {% else %}\n                    <p>No author facets.</p>\n                {% endif %}\n            </dl>\n        </div>\n        <!-- End faceting -->\n    \n        <!-- Display results... -->\n        {% for result in page.object_list %}\n            <div class=\"search_result\">\n                <h3><a href=\"{{ result.object.get_absolute_url }}\">{{ result.object.title }}</a></h3>\n            \n                <p>{{ result.object.body|truncatewords:80 }}</p>\n            </div>\n        {% empty %}\n            <p>Sorry, no results found.</p>\n        {% endfor %}\n    {% endif %}\n\nDisplaying the facets is a matter of looping through the facets you want and\nproviding the UI to suit. The ``author.0`` is the facet text from the backend\nand the ``author.1`` is the facet count.\n\n4. Narrowing The Search\n-----------------------\n\nWe've also set ourselves up for the last bit, the drill-down aspect. By\nappending on the ``selected_facets`` to the URLs, we're informing the\n``FacetedSearchForm`` that we want to narrow our results to only those\ncontaining the author we provided.\n\nFor a concrete example, if the facets on author come back as::\n\n    {\n        'dates': {},\n        'fields': {\n            'author': [\n                ('john', 4),\n                ('daniel', 2),\n                ('sally', 1),\n                ('terry', 1),\n            ],\n        },\n        'queries': {}\n    }\n\nYou should present a list similar to::\n\n    <ul>\n        <li><a href=\"/search/?q=Haystack&selected_facets=author_exact:john\">john</a> (4)</li>\n        <li><a href=\"/search/?q=Haystack&selected_facets=author_exact:daniel\">daniel</a> (2)</li>\n        <li><a href=\"/search/?q=Haystack&selected_facets=author_exact:sally\">sally</a> (1)</li>\n        <li><a href=\"/search/?q=Haystack&selected_facets=author_exact:terry\">terry</a> (1)</li>\n    </ul>\n\n.. warning::\n\n    Haystack can automatically handle most details around faceting. However,\n    since ``selected_facets`` is passed directly to narrow, it must use the\n    duplicated field name. Improvements to this are planned but incomplete.\n\nThis is simply the default behavior but it is possible to override or provide\nyour own form which does additional processing. You could also write your own\nfaceted ``SearchView``, which could provide additional/different facets based\non facets chosen. There is a wide range of possibilities available to help the\nuser navigate your content.\n"
  },
  {
    "path": "docs/faq.rst",
    "content": ".. _ref-frequently-asked-questions:\n\n==============================\n(In)Frequently Asked Questions\n==============================\n\n\nWhat is Haystack?\n=================\n\nHaystack is meant to be a portable interface to a search engine of your choice.\nSome might call it a search framework, an abstraction layer or what have you.\nThe idea is that you write your search code once and should be able to freely\nswitch between backends as your situation necessitates.\n\n\nWhy should I consider using Haystack?\n=====================================\n\nHaystack is targeted at the following use cases:\n\n* If you want to feature search on your site and search solutions like Google or\n  Yahoo search don't fit your needs.\n* If you want to be able to customize your search and search on more than just\n  the main content.\n* If you want to have features like drill-down (faceting) or \"More Like This\".\n* If you want a interface that is non-search engine specific, allowing you to\n  change your mind later without much rewriting.\n\n\nWhen should I not be using Haystack?\n====================================\n\n* Non-Model-based data. If you just want to index random data (flat files,\n  alternate sources, etc.), Haystack isn't a good solution. Haystack is very \n  ``Model``-based and doesn't work well outside of that use case.\n* Ultra-high volume. Because of the very nature of Haystack (abstraction layer),\n  there's more overhead involved. This makes it portable, but as with all\n  abstraction layers, you lose a little performance. You also can't take full\n  advantage of the exact feature-set of your search engine. This is the price\n  of pluggable backends.\n\n\nWhy was Haystack created when there are so many other search options?\n=====================================================================\n\nThe proliferation of search options in Django is a relatively recent development\nand is actually one of the reasons for Haystack's existence. There are too\nmany options that are only partial solutions or are too engine specific.\n\nFurther, most use an unfamiliar API and documentation is lacking in most cases.\n\nHaystack is an attempt to unify these efforts into one solution. That's not to\nsay there should be no alternatives, but Haystack should provide a good\nsolution to 80%+ of the search use cases out there.\n\n\nWhat's the history behind Haystack?\n===================================\n\nHaystack started because of my frustration with the lack of good search options\n(before many other apps came out) and as the result of extensive use of\nDjangosearch. Djangosearch was a decent solution but had a number of\nshortcomings, such as:\n\n* Tied to the models.py, so you'd have to modify the source of third-party (\n  or django.contrib) apps in order to effectively use it.\n* All or nothing approach to indexes. So all indexes appear on all sites and\n  in all places.\n* Lack of tests.\n* Lack of documentation.\n* Uneven backend implementations.\n\nThe initial idea was to simply fork Djangosearch and improve on these (and\nother issues). However, after stepping back, I decided to overhaul the entire\nAPI (and most of the underlying code) to be more representative of what I would\nwant as an end-user. The result was starting afresh and reusing concepts (and\nsome code) from Djangosearch as needed.\n\nAs a result of this heritage, you can actually still find some portions of\nDjangosearch present in Haystack (especially in the ``SearchIndex`` and\n``SearchBackend`` classes) where it made sense. The original authors of\nDjangosearch are aware of this and thus far have seemed to be fine with this\nreuse.\n\n\nWhy doesn't <search engine X> have a backend included in Haystack?\n==================================================================\n\nSeveral possibilities on this.\n\n#. Licensing\n\n   A common problem is that the Python bindings for a specific engine may\n   have been released under an incompatible license. The goal is for Haystack\n   to remain BSD licensed and importing bindings with an incompatible license\n   can technically convert the entire codebase to that license. This most\n   commonly occurs with GPL'ed bindings.\n\n#. Lack of time\n\n   The search engine in question may be on the list of backends to add and we\n   simply haven't gotten to it yet. We welcome patches for additional backends.\n\n#. Incompatible API\n\n   In order for an engine to work well with Haystack, a certain baseline set of\n   features is needed. This is often an issue when the engine doesn't support\n   ranged queries or additional attributes associated with a search record.\n\n#. We're not aware of the engine\n\n   If you think we may not be aware of the engine you'd like, please tell us\n   about it (preferably via the group - \n   http://groups.google.com/group/django-haystack/). Be sure to check through\n   the backends (in case it wasn't documented) and search the history on the\n   group to minimize duplicates.\n"
  },
  {
    "path": "docs/glossary.rst",
    "content": ".. _ref-glossary:\n\n========\nGlossary\n========\n\nSearch is a domain full of its own jargon and definitions. As this may be an\nunfamiliar territory to many developers, what follows are some commonly used\nterms and what they mean.\n\n\nEngine\n  An engine, for the purposes of Haystack, is a third-party search solution.\n  It might be a full service (i.e. Solr_) or a library to build an\n  engine with (i.e. Whoosh_)\n\n.. _Solr: http://lucene.apache.org/solr/\n.. _Whoosh: https://github.com/mchaput/whoosh/\n\nIndex\n  The datastore used by the engine is called an index. Its structure can vary\n  wildly between engines but commonly they resemble a document store. This is\n  the source of all information in Haystack.\n\nDocument\n  A document is essentially a record within the index. It usually contains at\n  least one blob of text that serves as the primary content the engine searches\n  and may have additional data hung off it.\n\nCorpus\n  A term for a collection of documents. When talking about the documents stored\n  by the engine (rather than the technical implementation of the storage), this\n  term is commonly used.\n\nField\n  Within the index, each document may store extra data with the main content as\n  a field. Also sometimes called an attribute, this usually represents metadata\n  or extra content about the document. Haystack can use these fields for\n  filtering and display.\n\nTerm\n  A term is generally a single word (or word-like) string of characters used\n  in a search query.\n\nStemming\n  A means of determining if a word has any root words. This varies by language,\n  but in English, this generally consists of removing plurals, an action form of\n  the word, et cetera. For instance, in English, 'giraffes' would stem to\n  'giraffe'. Similarly, 'exclamation' would stem to 'exclaim'. This is useful\n  for finding variants of the word that may appear in other documents.\n\nBoost\n  Boost provides a means to take a term or phrase from a search query and alter\n  the relevance of a result based on if that term is found in the result, a form\n  of weighting. For instance, if you wanted to more heavily weight results that\n  included the word 'zebra', you'd specify a boost for that term within the\n  query.\n\nMore Like This\n  Incorporating techniques from information retrieval and artificial\n  intelligence, More Like This is a technique for finding other documents within\n  the index that closely resemble the document in question. This is useful for\n  programmatically generating a list of similar content for a user to browse\n  based on the current document they are viewing.\n\nFaceting\n  Faceting is a way to provide insight to the user into the contents of your\n  corpus. In its simplest form, it is a set of document counts returned with\n  results when performing a query. These counts can be used as feedback for\n  the user, allowing the user to choose interesting aspects of their search\n  results and \"drill down\" into those results.\n\n  An example might be providing a facet on an ``author`` field, providing back a\n  list of authors and the number of documents in the index they wrote. This\n  could be presented to the user with a link, allowing the user to click and\n  narrow their original search to all results by that author.\n"
  },
  {
    "path": "docs/haystack_theme/layout.html",
    "content": "{% extends \"basic/layout.html\" %}\n\n{%- block extrahead %}\n    <link rel=\"stylesheet\" href=\"http://haystacksearch.org/css/front.css\" media=\"screen\">\n    <link rel=\"stylesheet\" href=\"_static/documentation.css\" media=\"screen\">\n{% endblock %}\n\n{%- block header %}\n    <div id=\"header\">\n        <h1>Haystack</h1>\n        <p>Modular search for Django</p>\n\n        <ul class=\"features\">\n          <li>Term Boost</li>\n          <li>More Like This</li>\n          <li>Faceting</li>\n          <li>Stored (non-indexed) fields</li>\n          <li>Highlighting</li>\n          <li>Spelling Suggestions</li>\n        </ul>\n    </div>\n{% endblock %}"
  },
  {
    "path": "docs/haystack_theme/static/documentation.css",
    "content": "a, a:link, a:hover { background-color: transparent !important; color: #CAECFF; outline-color: transparent !important; text-decoration: underline; }\ndl dt { text-decoration: underline; }\ndl.class dt, dl.method dt { background-color: #444444; padding: 5px; text-decoration: none; }\ntt.descname { font-weight: normal; }\ndl.method dt span.optional { font-weight: normal; }\ndiv#header { margin-bottom: 0px; }\ndiv.document, div.related, div.footer { width: 900px; margin: 0 auto; }\ndiv.document { margin-top: 10px; }\ndiv.related { background-color: #262511; padding-left: 10px; padding-right: 10px; }\ndiv.documentwrapper { width:640px; float:left;}\ndiv.body h1,\ndiv.body h2,\ndiv.body h3,\ndiv.body h4,\ndiv.body h5,\ndiv.body h6 {\n    background-color: #053211;\n    font-weight: normal;\n    border-bottom: 2px solid #262511;\n    margin: 20px -20px 10px -20px;\n    padding: 3px 0 3px 10px;\n}\ndiv.sphinxsidebar { width:220px; float:right;}\ndiv.sphinxsidebar ul { padding-left: 10px; }\ndiv.sphinxsidebar ul ul { padding-left: 10px; margin-left: 10px; }\ndiv.bodywrapper { margin: 0px; }\ndiv.highlight-python, div.highlight { background-color: #262511; margin-bottom: 10px; padding: 10px; }\ndiv.footer { background-color:#262511; font-size: 90%; padding: 10px; }\ntable thead { background-color: #053211; border-bottom: 1px solid #262511; }"
  },
  {
    "path": "docs/haystack_theme/theme.conf",
    "content": "[theme]\ninherit = basic"
  },
  {
    "path": "docs/highlighting.rst",
    "content": ".. _ref-highlighting:\n\n============\nHighlighting\n============\n\nHaystack supports two different methods of highlighting. You can either use\n``SearchQuerySet.highlight`` or the built-in ``{% highlight %}`` template tag,\nwhich uses the ``Highlighter`` class. Each approach has advantages and\ndisadvantages you need to weigh when deciding which to use.\n\nIf you want portable, flexible, decently fast code, the\n``{% highlight %}`` template tag (or manually using the underlying\n``Highlighter`` class) is the way to go. On the other hand, if you care more\nabout speed and will only ever be using one backend,\n``SearchQuerySet.highlight`` may suit your needs better.\n\nUse of ``SearchQuerySet.highlight`` is documented in the\n:doc:`searchqueryset_api` documentation and the ``{% highlight %}`` tag is\ncovered in the :doc:`templatetags` documentation, so the rest of this material\nwill cover the ``Highlighter`` implementation.\n\n\n``Highlighter``\n---------------\n\nThe ``Highlighter`` class is a pure-Python implementation included with Haystack\nthat's designed for flexibility. If you use the ``{% highlight %}`` template\ntag, you'll be automatically using this class. You can also use it manually in\nyour code. For example::\n\n    >>> from haystack.utils.highlighting import Highlighter\n\n    >>> my_text = 'This is a sample block that would be more meaningful in real life.'\n    >>> my_query = 'block meaningful'\n\n    >>> highlight = Highlighter(my_query)\n    >>> highlight.highlight(my_text)\n    u'...<span class=\"highlighted\">block</span> that would be more <span class=\"highlighted\">meaningful</span> in real life.'\n\nThe default implementation takes three optional kwargs: ``html_tag``,\n``css_class`` and ``max_length``. These allow for basic customizations to the\noutput, like so::\n\n    >>> from haystack.utils.highlighting import Highlighter\n\n    >>> my_text = 'This is a sample block that would be more meaningful in real life.'\n    >>> my_query = 'block meaningful'\n\n    >>> highlight = Highlighter(my_query, html_tag='div', css_class='found', max_length=35)\n    >>> highlight.highlight(my_text)\n    u'...<div class=\"found\">block</div> that would be more <div class=\"found\">meaningful</div>...'\n\nFurther, if this implementation doesn't suit your needs, you can define your own\ncustom highlighter class. As long as it implements the API you've just seen, it\ncan highlight however you choose. For example::\n\n    # In ``myapp/utils.py``...\n    from haystack.utils.highlighting import Highlighter\n\n    class BorkHighlighter(Highlighter):\n        def render_html(self, highlight_locations=None, start_offset=None, end_offset=None):\n            highlighted_chunk = self.text_block[start_offset:end_offset]\n\n            for word in self.query_words:\n                highlighted_chunk = highlighted_chunk.replace(word, 'Bork!')\n\n            return highlighted_chunk\n\nThen set the ``HAYSTACK_CUSTOM_HIGHLIGHTER`` setting to\n``myapp.utils.BorkHighlighter``. Usage would then look like::\n\n    >>> highlight = BorkHighlighter(my_query)\n    >>> highlight.highlight(my_text)\n    u'Bork! that would be more Bork! in real life.'\n\nNow the ``{% highlight %}`` template tag will also use this highlighter.\n"
  },
  {
    "path": "docs/index.rst",
    "content": "Welcome to Haystack!\n====================\n\nHaystack provides modular search for Django. It features a unified, familiar\nAPI that allows you to plug in different search backends (such as Solr_,\nElasticsearch_, Whoosh_, Xapian_, etc.) without having to modify your code.\n\n.. _Solr: http://lucene.apache.org/solr/\n.. _Elasticsearch: http://elasticsearch.org/\n.. _Whoosh: https://github.com/mchaput/whoosh/\n.. _Xapian: http://xapian.org/\n\n\n.. note::\n\n    This documentation represents the current version of Haystack. For old versions of the documentation:\n\n    * v2.5.X: https://django-haystack.readthedocs.io/en/v2.5.1/\n    * v2.4.X: https://django-haystack.readthedocs.io/en/v2.4.1/\n    * v2.3.X: https://django-haystack.readthedocs.io/en/v2.3.0/\n    * v2.2.X: https://django-haystack.readthedocs.io/en/v2.2.0/\n    * v2.1.X: https://django-haystack.readthedocs.io/en/v2.1.0/\n    * v2.0.X: https://django-haystack.readthedocs.io/en/v2.0.0/\n    * v1.2.X: https://django-haystack.readthedocs.io/en/v1.2.7/\n    * v1.1.X: https://django-haystack.readthedocs.io/en/v1.1/\n\nGetting Started\n---------------\n\nIf you're new to Haystack, you may want to start with these documents to get\nyou up and running:\n\n.. toctree::\n   :maxdepth: 2\n\n   tutorial\n\n.. toctree::\n   :maxdepth: 1\n\n   views_and_forms\n   templatetags\n   glossary\n   management_commands\n   faq\n   who_uses\n   other_apps\n   installing_search_engines\n   debugging\n\n   changelog\n   contributing\n   python3\n   migration_from_1_to_2\n\n\nAdvanced Uses\n-------------\n\nOnce you've got Haystack working, here are some of the more complex features\nyou may want to include in your application.\n\n.. toctree::\n   :maxdepth: 1\n\n   best_practices\n   highlighting\n   faceting\n   autocomplete\n   boost\n   signal_processors\n   multiple_index\n   rich_content_extraction\n   spatial\n   admin\n\n\nReference\n---------\n\nIf you're an experienced user and are looking for a reference, you may be\nlooking for API documentation and advanced usage as detailed in:\n\n.. toctree::\n   :maxdepth: 2\n\n   searchqueryset_api\n   searchindex_api\n   inputtypes\n   searchfield_api\n   searchresult_api\n   searchquery_api\n   searchbackend_api\n\n   architecture_overview\n   backend_support\n   settings\n   utils\n\n\nDeveloping\n----------\n\nFinally, if you're looking to help out with the development of Haystack,\nthe following links should help guide you on running tests and creating\nadditional backends:\n\n.. toctree::\n   :maxdepth: 1\n\n   running_tests\n   creating_new_backends\n\n\nRequirements\n------------\n\nHaystack has a relatively easily-met set of requirements.\n\n* Python 2.7+ or Python 3.3+\n* A supported version of Django: https://www.djangoproject.com/download/#supported-versions\n\nAdditionally, each backend has its own requirements. You should refer to\n:doc:`installing_search_engines` for more details.\n"
  },
  {
    "path": "docs/inputtypes.rst",
    "content": ".. _ref-inputtypes:\n\n===========\nInput Types\n===========\n\nInput types allow you to specify more advanced query behavior. They serve as a\nway to alter the query, often in backend-specific ways, without altering your\nPython code; as well as enabling use of more advanced features.\n\nInput types currently are only useful with the ``filter/exclude`` methods on\n``SearchQuerySet``. Expanding this support to other methods is on the roadmap.\n\n\nAvailable Input Types\n=====================\n\nIncluded with Haystack are the following input types:\n\n``Raw``\n-------\n\n.. class:: haystack.inputs.Raw\n\nRaw allows you to specify backend-specific query syntax. If Haystack doesn't\nprovide a way to access special query functionality, you can make use of this\ninput type to pass it along.\n\nExample::\n\n    # Fielded.\n    sqs = SearchQuerySet().filter(author=Raw('daniel OR jones'))\n\n    # Non-fielded.\n    # See ``AltParser`` for a better way to construct this.\n    sqs = SearchQuerySet().filter(content=Raw('{!dismax qf=author mm=1}haystack'))\n\n\n``Clean``\n---------\n\n.. class:: haystack.inputs.Clean\n\n``Clean`` takes standard user (untrusted) input and sanitizes it. It ensures\nthat no unintended operators or special characters make it into the query.\n\nThis is roughly analogous to Django's ``autoescape`` support.\n\n.. note::\n\n    By default, if you hand a ``SearchQuerySet`` a bare string, it will get\n    wrapped in this class.\n\nExample::\n\n    # This becomes \"daniel or jones\".\n    sqs = SearchQuerySet().filter(content=Clean('daniel OR jones'))\n\n    # Things like ``:`` & ``/`` get escaped.\n    sqs = SearchQuerySet().filter(url=Clean('http://www.example.com'))\n\n    # Equivalent (automatically wrapped in ``Clean``).\n    sqs = SearchQuerySet().filter(url='http://www.example.com')\n\n\n``Exact``\n---------\n\n.. class:: haystack.inputs.Exact\n\n``Exact`` allows for making sure a phrase is exactly matched, unlike the usual\n``AND`` lookups, where words may be far apart.\n\nExample::\n\n    sqs = SearchQuerySet().filter(author=Exact('n-gram support'))\n\n    # Equivalent.\n    sqs = SearchQuerySet().filter(author__exact='n-gram support')\n\n\n``Not``\n-------\n\n.. class:: haystack.inputs.Not\n\n``Not`` allows negation of the query fragment it wraps. As ``Not`` is a subclass\nof ``Clean``, it will also sanitize the query.\n\nThis is generally only used internally. Most people prefer to use the\n``SearchQuerySet.exclude`` method.\n\nExample::\n\n    sqs = SearchQuerySet().filter(author=Not('daniel'))\n\n\n``AutoQuery``\n-------------\n\n.. class:: haystack.inputs.AutoQuery\n\n``AutoQuery`` takes a more complex user query (that includes simple, standard\nquery syntax bits) & forms a proper query out of them. It also handles\nsanitizing that query using ``Clean`` to ensure the query doesn't break.\n\n``AutoQuery`` accommodates for handling regular words, NOT-ing words &\nextracting exact phrases.\n\nExample::\n\n    # Against the main text field with an accidental \":\" before \"search\".\n    # Generates a query like ``haystack (NOT whoosh) \"fast search\"``\n    sqs = SearchQuerySet().filter(content=AutoQuery('haystack -whoosh \"fast :search\"'))\n\n    # Equivalent.\n    sqs = SearchQuerySet().auto_query('haystack -whoosh \"fast :search\"')\n\n    # Fielded.\n    sqs = SearchQuerySet().filter(author=AutoQuery('daniel -day -lewis'))\n\n\n``AltParser``\n-------------\n\n.. class:: haystack.inputs.AltParser\n\n``AltParser`` lets you specify that a portion of the query should use a\nseparate parser in the search engine. This is search-engine-specific, so it may\ndecrease the portability of your app.\n\nCurrently only supported under Solr.\n\nExample::\n\n    # DisMax.\n    sqs = SearchQuerySet().filter(content=AltParser('dismax', 'haystack', qf='text', mm=1))\n\n    # Prior to the spatial support, you could do...\n    sqs = SearchQuerySet().filter(content=AltParser('dismax', 'haystack', qf='author', mm=1))\n\n\nCreating Your Own Input Types\n=============================\n\nBuilding your own input type is relatively simple. All input types are simple\nclasses that provide an ``__init__`` & a ``prepare`` method.\n\nThe ``__init__`` may accept any ``args/kwargs``, though the typical use usually\njust involves a query string.\n\nThe ``prepare`` method lets you alter the query the user provided before it\nbecomes of the main query. It is lazy, called as late as possible, right before\nthe final query is built & shipped to the engine.\n\nA full, if somewhat silly, example looks like::\n\n    from haystack.inputs import Clean\n\n\n    class NoShoutCaps(Clean):\n        input_type_name = 'no_shout_caps'\n        # This is the default & doesn't need to be specified.\n        post_process = True\n\n        def __init__(self, query_string, **kwargs):\n            # Stash the original, if you need it.\n            self.original = query_string\n            super(NoShoutCaps, self).__init__(query_string, **kwargs)\n\n        def prepare(self, query_obj):\n            # We need a reference to the current ``SearchQuery`` object this\n            # will run against, in case we need backend-specific code.\n            query_string = super(NoShoutCaps, self).prepare(query_obj)\n\n            # Take that, capital letters!\n            return query_string.lower()\n"
  },
  {
    "path": "docs/installing_search_engines.rst",
    "content": ".. _ref-installing-search-engines:\n\n=========================\nInstalling Search Engines\n=========================\n\nSolr\n====\n\nOfficial Download Location: http://www.apache.org/dyn/closer.cgi/lucene/solr/\n\nSolr is Java but comes in a pre-packaged form that requires very little other\nthan the JRE and Jetty. It's very performant and has an advanced featureset.\nHaystack suggests using Solr 6.x, though it's possible to get it working on\nSolr 4.x+ with a little effort. Installation is relatively simple:\n\nFor Solr 6.X::\n\n    curl -LO https://archive.apache.org/dist/lucene/solr/x.Y.0/solr-X.Y.0.tgz\n    mkdir solr\n    tar -C solr -xf solr-X.Y.0.tgz --strip-components=1\n    cd solr\n    ./bin/solr start                                    # start solr\n    ./bin/solr create -c tester -n basic_config         # create core named 'tester'\n\nBy default this will create a core with a managed schema.  This setup is dynamic\nbut not useful for haystack, and we'll need to configure solr to use a static\n(classic) schema.  Haystack can generate a viable schema.xml and solrconfig.xml\nfor you from your application and reload the core for you (once Haystack is\ninstalled and setup).  To do this run:\n``./manage.py build_solr_schema --configure-directory=<CoreConfigDif>\n--reload-core``. In this example CoreConfigDir is something like\n``../solr-6.5.0/server/solr/tester/conf``, and ``--reload-core``\nis what triggers reloading of the core.  Please refer to ``build_solr_schema``\nin the :doc:`management-commands` for required configuration.\n\nFor Solr 4.X::\n\n    curl -LO https://archive.apache.org/dist/lucene/solr/4.10.2/solr-4.10.2.tgz\n    tar xvzf solr-4.10.2.tgz\n    cd solr-4.10.2\n    cd example\n    java -jar start.jar\n\nYou’ll need to revise your schema. You can generate this from your application\n(once Haystack is installed and setup) by running\n``./manage.py build_solr_schema``. Take the output from that command and place\nit in ``solr-4.10.2/example/solr/collection1/conf/schema.xml``. Then restart\nSolr.\n\n.. warning::\n    Please note; the template filename, the file YOU supply under\n    TEMPLATE_DIR/search_configuration has changed to schema.xml from solr.xml.\n    The previous template name solr.xml was a legacy holdover from older\n    versions of solr.\n\nYou'll also need to install the ``pysolr`` client library from PyPI::\n\n    $ pip install pysolr\n\nMore Like This\n--------------\n\nOn Solr 6.X+ \"More Like This\" functionality is enabled by default. To enable \nthe \"More Like This\" functionality on earlier versions of Solr, you'll need\nto enable the ``MoreLikeThisHandler``. Add the following line to your\n``solrconfig.xml`` file within the ``config`` tag::\n\n    <requestHandler name=\"/mlt\" class=\"solr.MoreLikeThisHandler\" />\n\nSpelling Suggestions\n--------------------\n\nTo enable the spelling suggestion functionality in Haystack, you'll need to\nenable the ``SpellCheckComponent``.\n\nThe first thing to do is create a special field on your ``SearchIndex`` class\nthat mirrors the ``text`` field, but uses ``FacetCharField``. This disables\nthe post-processing that Solr does, which can mess up your suggestions.\nSomething like the following is suggested::\n\n    class MySearchIndex(indexes.SearchIndex, indexes.Indexable):\n        text = indexes.CharField(document=True, use_template=True)\n        # ... normal fields then...\n        suggestions = indexes.FacetCharField()\n\n        def prepare(self, obj):\n            prepared_data = super(MySearchIndex, self).prepare(obj)\n            prepared_data['suggestions'] = prepared_data['text']\n            return prepared_data\n\nThen, you enable it in Solr by adding the following line to your\n``solrconfig.xml`` file within the ``config`` tag::\n\n    <searchComponent name=\"spellcheck\" class=\"solr.SpellCheckComponent\">\n    \n      <str name=\"queryAnalyzerFieldType\">text_general</str>\n      <lst name=\"spellchecker\">\n        <str name=\"name\">default</str>\n        <str name=\"field\">text</str>\n        <str name=\"classname\">solr.DirectSolrSpellChecker</str>\n        <str name=\"distanceMeasure\">internal</str>\n        <float name=\"accuracy\">0.5</float>\n        <int name=\"maxEdits\">2</int>\n        <int name=\"minPrefix\">1</int>\n        <int name=\"maxInspections\">5</int>\n        <int name=\"minQueryLength\">4</int>\n        <float name=\"maxQueryFrequency\">0.01</float>\n      </lst>\n    </searchComponent>\n\nThen change your default handler from::\n\n    <requestHandler name=\"/select\" class=\"solr.SearchHandler\">\n      <lst name=\"defaults\">\n        <str name=\"echoParams\">explicit</str>\n        <int name=\"rows\">10</int>\n      </lst>\n    </requestHandler>\n    \n... to ...::\n\n    <requestHandler name=\"/select\" class=\"solr.SearchHandler\">\n      <lst name=\"defaults\">\n        <str name=\"echoParams\">explicit</str>\n        <int name=\"rows\">10</int>\n      \n        <str name=\"spellcheck.dictionary\">default</str>\n        <str name=\"spellcheck\">on</str>\n        <str name=\"spellcheck.extendedResults\">true</str>\n        <str name=\"spellcheck.count\">10</str>\n        <str name=\"spellcheck.alternativeTermCount\">5</str>\n        <str name=\"spellcheck.maxResultsForSuggest\">5</str>\n        <str name=\"spellcheck.collate\">true</str>\n        <str name=\"spellcheck.collateExtendedResults\">true</str>\n        <str name=\"spellcheck.maxCollationTries\">10</str>\n        <str name=\"spellcheck.maxCollations\">5</str>\n       </lst>\n       <arr name=\"last-components\">\n         <str>spellcheck</str>\n       </arr>\n    </requestHandler>\n\nBe warned that the ``<str name=\"field\">suggestions</str>`` portion will be specific to\nyour ``SearchIndex`` classes (in this case, assuming the main field is called\n``text``).\n\n\nElasticsearch\n=============\n\nElasticsearch is similar to Solr — another Java application using Lucene — but\nfocused on ease of deployment and clustering. See\nhttps://www.elastic.co/products/elasticsearch for more information.\n\nHaystack currently supports Elasticsearch 1.x, 2.x, and 5.x.\n\nFollow the instructions on https://www.elastic.co/downloads/elasticsearch to\ndownload and install Elasticsearch and configure it for your environment.\n\nYou'll also need to install the Elasticsearch binding: elasticsearch_ for the\nappropriate backend version — for example::\n\n    $ pip install \"elasticsearch>=5,<6\"\n\n.. _elasticsearch: https://pypi.python.org/pypi/elasticsearch/\n\n\nWhoosh\n======\n\nOfficial Download Location: https://github.com/whoosh-community/whoosh\n\nWhoosh is pure Python, so it's a great option for getting started quickly and\nfor development, though it does work for small scale live deployments. The\ncurrent recommended version is 1.3.1+. You can install via PyPI_ using\n``sudo easy_install whoosh`` or ``sudo pip install whoosh``.\n\nNote that, while capable otherwise, the Whoosh backend does not currently\nsupport \"More Like This\" or faceting. Support for these features has recently\nbeen added to Whoosh itself & may be present in a future release.\n\n.. _PyPI: http://pypi.python.org/pypi/Whoosh/\n\n\nXapian\n======\n\nOfficial Download Location: http://xapian.org/download\n\nXapian is written in C++ so it requires compilation (unless your OS has a\npackage for it). Installation looks like::\n\n    curl -O http://oligarchy.co.uk/xapian/1.2.18/xapian-core-1.2.18.tar.xz\n    curl -O http://oligarchy.co.uk/xapian/1.2.18/xapian-bindings-1.2.18.tar.xz\n\n    unxz xapian-core-1.2.18.tar.xz\n    unxz xapian-bindings-1.2.18.tar.xz\n\n    tar xvf xapian-core-1.2.18.tar\n    tar xvf xapian-bindings-1.2.18.tar\n\n    cd xapian-core-1.2.18\n    ./configure\n    make\n    sudo make install\n\n    cd ..\n    cd xapian-bindings-1.2.18\n    ./configure\n    make\n    sudo make install\n\nXapian is a third-party supported backend. It is not included in Haystack\nproper due to licensing. To use it, you need both Haystack itself as well as\n``xapian-haystack``. You can download the source from\nhttp://github.com/notanumber/xapian-haystack/tree/master. Installation\ninstructions can be found on that page as well. The backend, written\nby David Sauve (notanumber), fully implements the `SearchQuerySet` API and is\nan excellent alternative to Solr.\n"
  },
  {
    "path": "docs/management_commands.rst",
    "content": ".. _ref-management-commands:\n\n===================\nManagement Commands\n===================\n\nHaystack comes with several management commands to make working with Haystack\neasier.\n\n\n``clear_index``\n===============\n\nThe ``clear_index`` command wipes out your entire search index. Use with\ncaution. In addition to the standard management command options, it accepts the\nfollowing arguments:\n\n    ``--noinput``:\n        If provided, the interactive prompts are skipped and the index is\n        unceremoniously wiped out.\n    ``--verbosity``:\n        Accepted but ignored.\n    ``--using``:\n        Update only the named backend (can be used multiple times). By default,\n        all backends will be updated.\n    ``--nocommit``:\n        If provided, it will pass commit=False to the backend.  This means that the\n        update will not become immediately visible and will depend on another explicit commit\n        or the backend's commit strategy to complete the update.\n\nBy default, this is an **INTERACTIVE** command and assumes that you do **NOT**\nwish to delete the entire index.\n\n.. note::\n\n    The ``--nocommit`` argument is only supported by the Solr backend.\n\n.. warning::\n\n  Depending on the backend you're using, this may simply delete the entire\n  directory, so be sure your ``HAYSTACK_CONNECTIONS[<alias>]['PATH']`` setting is correctly\n  pointed at just the index directory.\n\n\n``update_index``\n================\n\n.. note::\n\n    If you use the ``--start/--end`` flags on this command, you'll need to\n    install dateutil_ to handle the datetime parsing.\n\n    .. _dateutil: http://pypi.python.org/pypi/python-dateutil/1.5\n\nThe ``update_index`` command will freshen all of the content in your index. It\niterates through all indexed models and updates the records in the index. In\naddition to the standard management command options, it accepts the following\narguments:\n\n    ``--age``:\n        Number of hours back to consider objects new. Useful for nightly\n        reindexes (``--age=24``). Requires ``SearchIndexes`` to implement\n        the ``get_updated_field`` method. Default is ``None``.\n    ``--start``:\n        The start date for indexing within. Can be any dateutil-parsable string,\n        recommended to be YYYY-MM-DDTHH:MM:SS. Requires ``SearchIndexes`` to\n        implement the ``get_updated_field`` method. Default is ``None``.\n    ``--end``:\n        The end date for indexing within. Can be any dateutil-parsable string,\n        recommended to be YYYY-MM-DDTHH:MM:SS. Requires ``SearchIndexes`` to\n        implement the ``get_updated_field`` method. Default is ``None``.\n    ``--batch-size``:\n        Number of items to index at once. Default is 1000.\n    ``--remove``:\n        Remove objects from the index that are no longer present in the\n        database.\n    ``--workers``:\n        Allows for the use multiple workers to parallelize indexing. Requires\n        ``multiprocessing``.\n    ``--verbosity``:\n        If provided, dumps out more information about what's being done.\n\n          * ``0`` = No output\n          * ``1`` = Minimal output describing what models were indexed\n            and how many records.\n          * ``2`` = Full output, including everything from ``1`` plus output\n            on each batch that is indexed, which is useful when debugging.\n    ``--using``:\n        Update only the named backend (can be used multiple times). By default,\n        all backends will be updated.\n    ``--nocommit``:\n        If provided, it will pass commit=False to the backend.  This means that the\n        updates will not become immediately visible and will depend on another explicit commit\n        or the backend's commit strategy to complete the update.\n\n.. note::\n\n    The ``--nocommit`` argument is only supported by the Solr and ElasticSearch backends.\n\nExamples::\n\n    # Update everything.\n    ./manage.py update_index --settings=settings.prod\n\n    # Update everything with lots of information about what's going on.\n    ./manage.py update_index --settings=settings.prod --verbosity=2\n\n    # Update everything, cleaning up after deleted models.\n    ./manage.py update_index --remove --settings=settings.prod\n\n    # Update everything changed in the last 2 hours.\n    ./manage.py update_index --age=2 --settings=settings.prod\n\n    # Update everything between Dec. 1, 2011 & Dec 31, 2011\n    ./manage.py update_index --start='2011-12-01T00:00:00' --end='2011-12-31T23:59:59' --settings=settings.prod\n\n    # Update just a couple apps.\n    ./manage.py update_index blog auth comments --settings=settings.prod\n\n    # Update just a single model (in a complex app).\n    ./manage.py update_index auth.User --settings=settings.prod\n\n    # Crazy Go-Nuts University\n    ./manage.py update_index events.Event media news.Story --start='2011-01-01T00:00:00 --remove --using=hotbackup --workers=12 --verbosity=2 --settings=settings.prod\n\n.. note::\n\n    This command *ONLY* updates records in the index. It does *NOT* handle\n    deletions unless the ``--remove`` flag is provided. You might consider\n    a queue consumer if the memory requirements for ``--remove`` don't\n    fit your needs. Alternatively, you can use the\n    ``RealtimeSignalProcessor``, which will automatically handle deletions.\n\n\n``rebuild_index``\n=================\n\nA shortcut for ``clear_index`` followed by ``update_index``. It accepts any/all\nof the arguments of the following arguments:\n\n    ``--age``:\n        Number of hours back to consider objects new. Useful for nightly\n        reindexes (``--age=24``). Requires ``SearchIndexes`` to implement\n        the ``get_updated_field`` method.\n    ``--batch-size``:\n        Number of items to index at once. Default is 1000.\n    ``--site``:\n        The site object to use when reindexing (like `search_sites.mysite`).\n    ``--noinput``:\n        If provided, the interactive prompts are skipped and the index is\n        unceremoniously wiped out.\n    ``--remove``:\n        Remove objects from the index that are no longer present in the\n        database.\n    ``--verbosity``:\n        If provided, dumps out more information about what's being done.\n\n          * ``0`` = No output\n          * ``1`` = Minimal output describing what models were indexed\n            and how many records.\n          * ``2`` = Full output, including everything from ``1`` plus output\n            on each batch that is indexed, which is useful when debugging.\n    ``--using``:\n        Update only the named backend (can be used multiple times). By default,\n        all backends will be updated.\n    ``--nocommit``:\n        If provided, it will pass commit=False to the backend.  This means that the\n        update will not become immediately visible and will depend on another explicit commit\n        or the backend's commit strategy to complete the update.\n\nFor when you really, really want a completely rebuilt index.\n\n\n``build_solr_schema``\n=====================\n\nOnce all of your ``SearchIndex`` classes are in place, this command can be used\nto generate the XML schema Solr needs to handle the search data.  Generates a\nSolr schema and solrconfig file that reflects the indexes using templates under\na Django template dir 'search_configuration/\\*.xml'. If none are found, then\nprovides defaults suitable for Solr 6.4.\n\nIt accepts the following arguments:\n\n    ``--filename``:\n        If provided, renders schema.xml from the template directory directly to\n        a file instead of stdout. Does not render solrconfig.xml\n    ``--using``:\n        Update only the named backend (can be used multiple times). By default\n        all backends will be updated.\n    ``--configure-directory``:\n        If provided, attempts to configure a core located in the given directory\n        by removing the ``managed-schema.xml`` (renaming if it exists), configuring\n        the core by rendering the ``schema.xml`` and ``solrconfig.xml`` templates\n        provided in the Django project's ``TEMPLATE_DIR/search_configuration``\n        directories.\n    ``--reload-core``:\n        If provided, attempts to automatically reload the solr core via the urls\n        in the ``URL`` and ``ADMIN_URL`` settings of the Solr entry in\n        ``HAYSTACK_CONNECTIONS``. Both *must* be provided.\n\n.. note::\n   ``build_solr_schema --configure-directory=<dir>`` can be used in isolation to\n   drop configured files anywhere one might want for staging to one or more solr\n   instances through arbitrary means.  It will render all template files in the\n   directory into the ``configure-directory``\n\n   ``build_solr_schema --configure-directory=<dir> --reload-core`` can be used\n   together to reconfigure and reload a core located on a filesystem accessible\n   to Django in a one-shot mechanism with no further requirements (assuming\n   there are no errors in the template or configuration)\n\n.. note::\n    ``build_solr_schema`` uses templates to generate the output files. Haystack\n    provides default templates for ``schema.xml`` and ``solrconfig.xml`` that\n    are solr 6.5 compatible using some sensible defaults. If you would like to\n    provide your own template, you will need to place it in\n    ``search_configuration/`` inside a directory specified by your app's\n    template directories settings. Examples::\n\n        /myproj/myapp/templates/search_configuration/schema.xml\n        /myproj/myapp/templates/search_configuration/sorlconfig.xml\n        /myproj/myapp/templates/search_configuration/otherfile.xml\n        # ...or...\n        /myproj/templates/search_configuration/schema.xml\n        /myproj/templates/search_configuration/sorlconfig.xml\n        /myproj/myapp/templates/search_configuration/otherfile.xml\n\n.. warning::\n    This command does NOT automatically update the ``schema.xml`` file for you\n    all by itself.  You must use --filename or --configure-directory to achieve\n    this.\n\n\n``haystack_info``\n=================\n\nProvides some basic information about how Haystack is setup and what models it\nis handling. It accepts no arguments. Useful when debugging or when using\nHaystack-enabled third-party apps.\n"
  },
  {
    "path": "docs/migration_from_1_to_2.rst",
    "content": ".. _ref-migration_from_1_to_2:\n\n===========================================\nMigrating From Haystack 1.X to Haystack 2.X\n===========================================\n\nHaystack introduced several backward-incompatible changes in the process of\nmoving from the 1.X series to the 2.X series. These were done to clean up the\nAPI, to support new features & to clean up problems in 1.X. At a high level,\nthey consisted of:\n\n* The removal of ``SearchSite`` & ``haystack.site``.\n* The removal of ``handle_registrations`` & ``autodiscover``.\n* The addition of multiple index support.\n* The addition of ``SignalProcessors`` & the removal of ``RealTimeSearchIndex``.\n* The removal/renaming of various settings.\n\nThis guide will help you make the changes needed to be compatible with Haystack\n2.X.\n\n\nSettings\n========\n\nMost prominently, the old way of specifying a backend & its settings has changed\nto support the multiple index feature. A complete Haystack 1.X example might\nlook like::\n\n    HAYSTACK_SEARCH_ENGINE = 'solr'\n    HAYSTACK_SOLR_URL = 'http://localhost:9001/solr/default'\n    HAYSTACK_SOLR_TIMEOUT = 60 * 5\n    HAYSTACK_INCLUDE_SPELLING = True\n    HAYSTACK_BATCH_SIZE = 100\n\n    # Or...\n    HAYSTACK_SEARCH_ENGINE = 'whoosh'\n    HAYSTACK_WHOOSH_PATH = '/home/search/whoosh_index'\n    HAYSTACK_WHOOSH_STORAGE = 'file'\n    HAYSTACK_WHOOSH_POST_LIMIT = 128 * 1024 * 1024\n    HAYSTACK_INCLUDE_SPELLING = True\n    HAYSTACK_BATCH_SIZE = 100\n\n    # Or...\n    HAYSTACK_SEARCH_ENGINE = 'xapian'\n    HAYSTACK_XAPIAN_PATH = '/home/search/xapian_index'\n    HAYSTACK_INCLUDE_SPELLING = True\n    HAYSTACK_BATCH_SIZE = 100\n\nIn Haystack 2.X, you can now supply as many backends as you like, so all of the\nabove settings can now be active at the same time. A translated set of settings\nwould look like::\n\n    HAYSTACK_CONNECTIONS = {\n        'default': {\n            'ENGINE': 'haystack.backends.solr_backend.SolrEngine',\n            'URL': 'http://localhost:9001/solr/default',\n            'TIMEOUT': 60 * 5,\n            'INCLUDE_SPELLING': True,\n            'BATCH_SIZE': 100,\n        },\n        'autocomplete': {\n            'ENGINE': 'haystack.backends.whoosh_backend.WhooshEngine',\n            'PATH': '/home/search/whoosh_index',\n            'STORAGE': 'file',\n            'POST_LIMIT': 128 * 1024 * 1024,\n            'INCLUDE_SPELLING': True,\n            'BATCH_SIZE': 100,\n        },\n        'slave': {\n            'ENGINE': 'xapian_backend.XapianEngine',\n            'PATH': '/home/search/xapian_index',\n            'INCLUDE_SPELLING': True,\n            'BATCH_SIZE': 100,\n        },\n    }\n\nYou are required to have at least one connection listed within\n``HAYSTACK_CONNECTIONS``, it must be named ``default`` & it must have a valid\n``ENGINE`` within it. Bare minimum looks like::\n\n    HAYSTACK_CONNECTIONS = {\n        'default': {\n            'ENGINE': 'haystack.backends.simple_backend.SimpleEngine'\n        }\n    }\n\nThe key for each backend is an identifier you use to describe the backend within\nyour app. You should refer to the :ref:`ref-multiple_index` documentation for\nmore information on using the new multiple indexes & routing features.\n\nAlso note that the ``ENGINE`` setting has changed from a lowercase \"short name\"\nof the engine to a full path to a new ``Engine`` class within the backend.\nAvailable options are:\n\n* ``haystack.backends.solr_backend.SolrEngine``\n* ``haystack.backends.whoosh_backend.WhooshEngine``\n* ``haystack.backends.simple_backend.SimpleEngine``\n\nAdditionally, the following settings were outright removed & will generate\nan exception if found:\n\n* ``HAYSTACK_SITECONF`` - Remove this setting & the file it pointed to.\n* ``HAYSTACK_ENABLE_REGISTRATIONS``\n* ``HAYSTACK_INCLUDE_SPELLING``\n\n\nBackends\n========\n\nThe ``dummy`` backend was outright removed from Haystack, as it served very\nlittle use after the ``simple`` (pure-ORM-powered) backend was introduced.\n\nIf you wrote a custom backend, please refer to the \"Custom Backends\" section\nbelow.\n\n\nIndexes\n=======\n\nThe other major changes affect the ``SearchIndex`` class. As the concept of\n``haystack.site`` & ``SearchSite`` are gone, you'll need to modify your indexes.\n\nA Haystack 1.X index might've looked like::\n\n    import datetime\n    from haystack.indexes import *\n    from haystack import site\n    from myapp.models import Note\n\n\n    class NoteIndex(SearchIndex):\n        text = CharField(document=True, use_template=True)\n        author = CharField(model_attr='user')\n        pub_date = DateTimeField(model_attr='pub_date')\n\n        def get_queryset(self):\n            \"\"\"Used when the entire index for model is updated.\"\"\"\n            return Note.objects.filter(pub_date__lte=datetime.datetime.now())\n\n\n    site.register(Note, NoteIndex)\n\nA converted Haystack 2.X index should look like::\n\n    import datetime\n    from haystack import indexes\n    from myapp.models import Note\n\n\n    class NoteIndex(indexes.SearchIndex, indexes.Indexable):\n        text = indexes.CharField(document=True, use_template=True)\n        author = indexes.CharField(model_attr='user')\n        pub_date = indexes.DateTimeField(model_attr='pub_date')\n\n        def get_model(self):\n            return Note\n\n        def index_queryset(self, using=None):\n            \"\"\"Used when the entire index for model is updated.\"\"\"\n            return self.get_model().objects.filter(pub_date__lte=datetime.datetime.now())\n\nNote the import on ``site`` & the registration statements are gone. Newly added\nare is the ``NoteIndex.get_model`` method. This is a **required** method &\nshould simply return the ``Model`` class the index is for.\n\nThere's also a new, additional class added to the ``class`` definition. The\n``indexes.Indexable`` class is a simple mixin that serves to identify the\nclasses Haystack should automatically discover & use. If you have a custom\nbase class (say ``QueuedSearchIndex``) that other indexes inherit from, simply\nleave the ``indexes.Indexable`` off that declaration & Haystack won't try to\nuse it.\n\nAdditionally, the name of the ``document=True`` field is now enforced to be\n``text`` across all indexes. If you need it named something else, you should\nset the ``HAYSTACK_DOCUMENT_FIELD`` setting. For example::\n\n    HAYSTACK_DOCUMENT_FIELD = 'pink_polka_dot'\n\nFinally, the ``index_queryset`` method should supplant the ``get_queryset``\nmethod. This was present in the Haystack 1.2.X series (with a deprecation warning\nin 1.2.4+) but has been removed in Haystack v2.\n\nFinally, if you were unregistering other indexes before, you should make use of\nthe new ``EXCLUDED_INDEXES`` setting available in each backend's settings. It\nshould be a list of strings that contain the Python import path to the indexes\nthat should not be loaded & used. For example::\n\n    HAYSTACK_CONNECTIONS = {\n        'default': {\n            'ENGINE': 'haystack.backends.solr_backend.SolrEngine',\n            'URL': 'http://localhost:9001/solr/default',\n            'EXCLUDED_INDEXES': [\n                # Imagine that these indexes exist. They don't.\n                'django.contrib.auth.search_indexes.UserIndex',\n                'third_party_blog_app.search_indexes.EntryIndex',\n            ]\n        }\n    }\n\nThis allows for reliable swapping of the index that handles a model without\nrelying on correct import order.\n\n\nRemoval of ``RealTimeSearchIndex``\n==================================\n\nUse of the ``haystack.indexes.RealTimeSearchIndex`` is no longer valid. It has\nbeen removed in favor of ``RealtimeSignalProcessor``. To migrate, first change\nthe inheritance of all your ``RealTimeSearchIndex`` subclasses to use\n``SearchIndex`` instead::\n\n    # Old.\n    class MySearchIndex(indexes.RealTimeSearchIndex, indexes.Indexable):\n        # ...\n\n\n    # New.\n    class MySearchIndex(indexes.SearchIndex, indexes.Indexable):\n        # ...\n\nThen update your settings to enable use of the ``RealtimeSignalProcessor``::\n\n    HAYSTACK_SIGNAL_PROCESSOR = 'haystack.signals.RealtimeSignalProcessor'\n\n\nDone!\n=====\n\nFor most basic uses of Haystack, this is all that is necessary to work with\nHaystack 2.X. You should rebuild your index if needed & test your new setup.\n\n\nAdvanced Uses\n=============\n\nSwapping Backend\n----------------\n\nIf you were manually swapping the ``SearchQuery`` or ``SearchBackend`` being\nused by ``SearchQuerySet`` in the past, it's now preferable to simply setup\nanother connection & use the ``SearchQuerySet.using`` method to select that\nconnection instead.\n\nAlso, if you were manually instantiating ``SearchBackend`` or ``SearchQuery``,\nit's now preferable to rely on the connection's engine to return the right\nthing. For example::\n\n    from haystack import connections\n    backend = connections['default'].get_backend()\n    query = connections['default'].get_query()\n\n\nCustom Backends\n---------------\n\nIf you had written a custom ``SearchBackend`` and/or custom ``SearchQuery``,\nthere's a little more work needed to be Haystack 2.X compatible.\n\nYou should, but don't have to, rename your ``SearchBackend`` & ``SearchQuery``\nclasses to be more descriptive/less collide-y. For example,\n``solr_backend.SearchBackend`` became ``solr_backend.SolrSearchBackend``. This\nprevents non-namespaced imports from stomping on each other.\n\nYou need to add a new class to your backend, subclassing ``BaseEngine``. This\nallows specifying what ``backend`` & ``query`` should be used on a connection\nwith less duplication/naming trickery. It goes at the bottom of the file (so\nthat the classes are defined above it) and should look like::\n\n    from haystack.backends import BaseEngine\n    from haystack.backends.solr_backend import SolrSearchQuery\n\n    # Code then...\n\n    class MyCustomSolrEngine(BaseEngine):\n        # Use our custom backend.\n        backend = MySolrBackend\n        # Use the built-in Solr query.\n        query = SolrSearchQuery\n\nYour ``HAYSTACK_CONNECTIONS['default']['ENGINE']`` should then point to the\nfull Python import path to your new ``BaseEngine`` subclass.\n\nFinally, you will likely have to adjust the ``SearchBackend.__init__`` &\n``SearchQuery.__init__``, as they have changed significantly. Please refer to\nthe commits for those backends.\n"
  },
  {
    "path": "docs/multiple_index.rst",
    "content": ".. _ref-multiple_index:\n\n================\nMultiple Indexes\n================\n\nMuch like Django's `multiple database support`_, Haystack has \"multiple index\"\nsupport. This allows you to talk to several different engines at the same time.\nIt enables things like master-slave setups, multiple language indexing,\nseparate indexes for general search & autocomplete as well as other options.\n\n.. _`multiple database support`: http://docs.djangoproject.com/en/dev/topics/db/multi-db/\n\n\nSpecifying Available Connections\n================================\n\nYou can supply as many backends as you like, each with a descriptive name. A\ncomplete setup that accesses all backends might look like::\n\n    HAYSTACK_CONNECTIONS = {\n        'default': {\n            'ENGINE': 'haystack.backends.solr_backend.SolrEngine',\n            'URL': 'http://localhost:9001/solr/default',\n            'TIMEOUT': 60 * 5,\n            'INCLUDE_SPELLING': True,\n            'BATCH_SIZE': 100,\n            'SILENTLY_FAIL': True,\n        },\n        'autocomplete': {\n            'ENGINE': 'haystack.backends.whoosh_backend.WhooshEngine',\n            'PATH': '/home/search/whoosh_index',\n            'STORAGE': 'file',\n            'POST_LIMIT': 128 * 1024 * 1024,\n            'INCLUDE_SPELLING': True,\n            'BATCH_SIZE': 100,\n            'SILENTLY_FAIL': True,\n        },\n        'slave': {\n            'ENGINE': 'xapian_backend.XapianEngine',\n            'PATH': '/home/search/xapian_index',\n            'INCLUDE_SPELLING': True,\n            'BATCH_SIZE': 100,\n            'SILENTLY_FAIL': True,\n        },\n        'db': {\n            'ENGINE': 'haystack.backends.simple_backend.SimpleEngine',\n            'SILENTLY_FAIL': True,\n        }\n    }\n\nYou are required to have at least one connection listed within\n``HAYSTACK_CONNECTIONS``, it must be named ``default`` & it must have a valid\n``ENGINE`` within it.\n\n\nManagement Commands\n===================\n\nAll management commands that manipulate data use **ONLY** one connection at a\ntime. By default, they use the ``default`` index but accept a ``--using`` flag\nto specify a different connection. For example::\n\n    ./manage.py rebuild_index --noinput --using=whoosh\n\n\nAutomatic Routing\n=================\n\nTo make the selection of the correct index easier, Haystack (like Django) has\nthe concept of \"routers\". All provided routers are checked whenever a read or\nwrite happens, in the order in which they are defined.\n\nFor read operations (when a search query is executed), the ``for_read`` method\nof each router is called, until one of them returns an index, which is used for\nthe read operation.\n\nFor write operations (when a delete or update is executed), the ``for_write``\nmethod of each router is called, and the results are aggregated. All of the\nindexes that were returned are then updated.\n\nHaystack ships with a ``DefaultRouter`` enabled. It looks like::\n\n    class DefaultRouter(BaseRouter):\n        def for_read(self, **hints):\n            return DEFAULT_ALIAS\n        \n        def for_write(self, **hints):\n            return DEFAULT_ALIAS\n\nThis means that the default index is used for all read and write operations.\n\nIf the ``for_read`` or ``for_write`` method doesn't exist or returns ``None``,\nthat indicates that the current router can't handle the data. The next router\nis then checked.\n\nThe ``for_write`` method can return either a single string representing an\nindex name, or an iterable of such index names. For example::\n\n    class UpdateEverythingRouter(BaseRouter):\n        def for_write(self, **hints):\n            return ('myindex1', 'myindex2')\n\nThe ``hints`` passed can be anything that helps the router make a decision. This\ndata should always be considered optional & be guarded against. At current,\n``for_write`` receives an ``index`` option (pointing to the ``SearchIndex``\ncalling it) while ``for_read`` may receive ``models`` (being a list of ``Model``\nclasses the ``SearchQuerySet`` may be looking at).\n\nYou may provide as many routers as you like by overriding the\n``HAYSTACK_ROUTERS`` setting. For example::\n\n    HAYSTACK_ROUTERS = ['myapp.routers.MasterRouter', 'myapp.routers.SlaveRouter', 'haystack.routers.DefaultRouter']\n\nMaster-Slave Example\n--------------------\n\nThe ``MasterRouter`` & ``SlaveRouter`` might look like::\n\n    from haystack import routers\n    \n    \n    class MasterRouter(routers.BaseRouter):\n        def for_write(self, **hints):\n            return 'master'\n        \n        def for_read(self, **hints):\n            return None\n    \n    \n    class SlaveRouter(routers.BaseRouter):\n        def for_write(self, **hints):\n            return None\n        \n        def for_read(self, **hints):\n            return 'slave'\n\nThe observant might notice that since the methods don't overlap, this could be\ncombined into one ``Router`` like so::\n\n    from haystack import routers\n    \n    \n    class MasterSlaveRouter(routers.BaseRouter):\n        def for_write(self, **hints):\n            return 'master'\n        \n        def for_read(self, **hints):\n            return 'slave'\n\n\nManually Selecting\n==================\n\nThere may be times when automatic selection of the correct index is undesirable,\nsuch as when fixing erroneous data in an index or when you know exactly where\ndata should be located.\n\nFor this, the ``SearchQuerySet`` class allows for manually selecting the index\nvia the ``SearchQuerySet.using`` method::\n\n    from haystack.query import SearchQuerySet\n    \n    # Uses the routers' opinion.\n    sqs = SearchQuerySet().auto_query('banana')\n    \n    # Forces the default.\n    sqs = SearchQuerySet().using('default').auto_query('banana')\n    \n    # Forces the slave connection (presuming it was setup).\n    sqs = SearchQuerySet().using('slave').auto_query('banana')\n\n.. warning::\n\n  Note that the models a ``SearchQuerySet`` is trying to pull from must all come\n  from the same index. Haystack is not able to combine search queries against\n  different indexes.\n\n\nCustom Index Selection\n======================\n\nIf a specific backend has been selected, the ``SearchIndex.index_queryset`` and\n``SearchIndex.read_queryset`` will receive the backend name, giving indexes the\nopportunity to customize the returned queryset.\n\nFor example, a site which uses separate indexes for recent items and older\ncontent might define ``index_queryset`` to filter the items based on date::\n\n        def index_queryset(self, using=None):\n            qs = Note.objects.all()\n            archive_limit = datetime.datetime.now() - datetime.timedelta(days=90)\n\n            if using == \"archive\":\n                return qs.filter(pub_date__lte=archive_limit)\n            else:\n                return qs.filter(pub_date__gte=archive_limit)\n\n\nMulti-lingual Content\n---------------------\n\nMost search engines require you to set the language at the index level. For\nexample, a multi-lingual site using Solr can use `multiple cores <http://wiki.apache.org/solr/CoreAdmin>`_ and corresponding Haystack\nbackends using the language name. Under this scenario, queries are simple::\n\n    sqs = SearchQuerySet().using(lang).auto_query(…)\n\nDuring index updates, the Index's ``index_queryset`` method will need to filter\nthe items to avoid sending the wrong content to the search engine::\n\n        def index_queryset(self, using=None):\n            return Post.objects.filter(language=using)\n"
  },
  {
    "path": "docs/other_apps.rst",
    "content": ".. _ref-other_apps:\n\n=============================\nHaystack-Related Applications\n=============================\n\nSub Apps\n========\n\nThese are apps that build on top of the infrastructure provided by Haystack.\nUseful for essentially extending what Haystack can do.\n\nqueued_search\n-------------\n\nhttp://github.com/toastdriven/queued_search (2.X compatible)\n\nProvides a queue-based setup as an alternative to ``RealtimeSignalProcessor`` or\nconstantly running the ``update_index`` command. Useful for high-load, short\nupdate time situations.\n\ncelery-haystack\n---------------\n\nhttps://github.com/jezdez/celery-haystack (1.X and 2.X compatible)\n\nAlso provides a queue-based setup, this time centered around Celery. Useful\nfor keeping the index fresh per model instance or with the included task\nto call the ``update_index`` management command instead.\n\nhaystack-rqueue\n---------------\n\nhttps://github.com/mandx/haystack-rqueue (2.X compatible)\n\nAlso provides a queue-based setup, this time centered around RQ. Useful\nfor keeping the index fresh using ``./manage.py rqworker``.\n\ndjango-celery-haystack\n----------------------\n\nhttps://github.com/mixcloud/django-celery-haystack-SearchIndex\n\nAnother queue-based setup, also around Celery. Useful\nfor keeping the index fresh.\n\nsaved_searches\n--------------\n\nhttp://github.com/toastdriven/saved_searches (2.X compatible)\n\nAdds personalization to search. Retains a history of queries run by the various\nusers on the site (including anonymous users). This can be used to present the\nuser with their search history and provide most popular/most recent queries\non the site.\n\nsaved-search\n------------\n\nhttps://github.com/DirectEmployers/saved-search\n\nAn alternate take on persisting user searches, this has a stronger focus\non locale-based searches as well as further integration.\n\nhaystack-static-pages\n---------------------\n\nhttp://github.com/trapeze/haystack-static-pages\n\nProvides a simple way to index flat (non-model-based) content on your site.\nBy using the management command that comes with it, it can crawl all pertinent\npages on your site and add them to search.\n\ndjango-tumbleweed\n-----------------\n\nhttp://github.com/mcroydon/django-tumbleweed\n\nProvides a tumblelog-like view to any/all Haystack-enabled models on your\nsite. Useful for presenting date-based views of search data. Attempts to avoid\nthe database completely where possible.\n\n\nHaystack-Enabled Apps\n=====================\n\nThese are reusable apps that ship with ``SearchIndexes``, suitable for quick\nintegration with Haystack.\n\n* django-faq (freq. asked questions app) - http://github.com/benspaulding/django-faq\n* django-essays (blog-like essay app) - http://github.com/bkeating/django-essays\n* gtalug (variety of apps) - http://github.com/myles/gtalug\n* sciencemuseum (science museum open data) - http://github.com/simonw/sciencemuseum\n* vz-wiki (wiki) - http://github.com/jobscry/vz-wiki\n* ffmff (events app) - http://github.com/stefreak/ffmff\n* Dinette (forums app) - http://github.com/uswaretech/Dinette\n* fiftystates_site (site) - http://github.com/sunlightlabs/fiftystates_site\n* Open-Knesset (site) - http://github.com/ofri/Open-Knesset\n"
  },
  {
    "path": "docs/python3.rst",
    "content": ".. _ref-python3:\n\n================\nPython 3 Support\n================\n\nAs of Haystack v2.1.0, it has been ported to support both Python 2 & Python 3\nwithin the same codebase. This builds on top of what `six`_ & `Django`_ provide.\n\nNo changes are required for anyone running an existing Haystack\ninstallation. The API is completely backward-compatible, so you should be able\nto run your existing software without modification.\n\nVirtually all tests pass under both Python 2 & 3, with a small number of\nexpected failures under Python (typically related to ordering, see below).\n\n.. _`six`: http://pythonhosted.org/six/\n.. _`Django`: https://docs.djangoproject.com/en/1.5/topics/python3/#str-and-unicode-methods\n\n\nSupported Backends\n==================\n\nThe following backends are fully supported under Python 3. However, you may\nneed to update these dependencies if you have a pre-existing setup.\n\n* Solr (pysolr>=3.1.0)\n* Elasticsearch\n\n\nNotes\n=====\n\nTesting\n-------\n\nIf you were testing things such as the query generated by a given\n``SearchQuerySet`` or how your forms would render, under Python 3.3.2+,\n`hash randomization`_ is in effect, which means that the ordering of\ndictionaries is no longer consistent, even on the same platform.\n\nHaystack took the approach of abandoning making assertions about the entire\nstructure. Instead, we either simply assert that the new object contains the\nright things or make a call to ``sorted(...)`` around it to ensure order. It is\nrecommended you take a similar approach.\n\n.. _`hash randomization`: http://docs.python.org/3/whatsnew/3.3.html#builtin-functions-and-types\n"
  },
  {
    "path": "docs/rich_content_extraction.rst",
    "content": ".. _ref-rich_content_extraction:\n\n=======================\nRich Content Extraction\n=======================\n\nFor some projects it is desirable to index text content which is stored in\nstructured files such as PDFs, Microsoft Office documents, images, etc.\nCurrently only Solr's `ExtractingRequestHandler`_ is directly supported by\nHaystack but the approach below could be used with any backend which supports\nthis feature.\n\n.. _`ExtractingRequestHandler`: http://wiki.apache.org/solr/ExtractingRequestHandler\n\nExtracting Content\n==================\n\n:meth:`SearchBackend.extract_file_contents` accepts a file or file-like object\nand returns a dictionary containing two keys: ``metadata`` and ``contents``. The\n``contents`` value will be a string containing all of the text which the backend\nmanaged to extract from the file contents. ``metadata`` will always be a\ndictionary but the keys and values will vary based on the underlying extraction\nengine and the type of file provided.\n\nIndexing Extracted Content\n==========================\n\nGenerally you will want to include the extracted text in your main document\nfield along with everything else specified in your search template. This example\nshows how to override a hypothetical ``FileIndex``'s ``prepare`` method to\ninclude the extract content along with information retrieved from the database::\n\n    def prepare(self, obj):\n        data = super(FileIndex, self).prepare(obj)\n\n        # This could also be a regular Python open() call, a StringIO instance\n        # or the result of opening a URL. Note that due to a library limitation\n        # file_obj must have a .name attribute even if you need to set one\n        # manually before calling extract_file_contents:\n        file_obj = obj.the_file.open()\n\n        extracted_data = self.get_backend().extract_file_contents(file_obj)\n\n        # Now we'll finally perform the template processing to render the\n        # text field with *all* of our metadata visible for templating:\n        t = loader.select_template(('search/indexes/myapp/file_text.txt', ))\n        data['text'] = t.render(Context({'object': obj,\n                                         'extracted': extracted_data}))\n\n        return data\n\nThis allows you to insert the extracted text at the appropriate place in your\ntemplate, modified or intermixed with database content as appropriate:\n\n.. code-block:: html+django\n\n    {{ object.title }}\n    {{ object.owner.name }}\n\n    …\n\n    {% for k, v in extracted.metadata.items %}\n        {% for val in v %}\n            {{ k }}: {{ val|safe }}\n        {% endfor %}\n    {% endfor %}\n\n    {{ extracted.contents|striptags|safe }}"
  },
  {
    "path": "docs/running_tests.rst",
    "content": ".. _ref-running-tests:\n\n=============\nRunning Tests\n=============\n\nEverything\n==========\n\nThe simplest way to get up and running with Haystack's tests is to run::\n\n    python setup.py test\n\nThis installs all of the backend libraries & all dependencies for getting the\ntests going and runs the tests. You will still have to setup search servers\n(for running Solr tests, the spatial Solr tests & the Elasticsearch tests).\n\n\nCherry-Picked\n=============\n\nIf you'd rather not run all the tests, run only the backends you need since\ntests for backends that are not running will be skipped.\n\n``Haystack`` is maintained with all tests passing at all times, so if you\nreceive any errors during testing, please check your setup and file a report if\nthe errors persist.\n\nTo run just a portion of the tests you can use the script ``run_tests.py`` and\njust specify the files or directories you wish to run, for example::\n\n    cd test_haystack\n    ./run_tests.py whoosh_tests test_loading.py\n\nThe ``run_tests.py`` script is just a tiny wrapper around the nose_ library and\nany options you pass to it will be passed on; including ``--help`` to get a\nlist of possible options::\n\n    cd test_haystack\n    ./run_tests.py --help\n\n.. _nose: https://nose.readthedocs.io/en/latest/\n\nConfiguring Solr\n================\n\nHaystack assumes that you have a Solr server running on port ``9001`` which\nuses the schema and configuration provided in the\n``test_haystack/solr_tests/server/`` directory. For convenience, a script is\nprovided which will download, configure and start a test Solr server::\n\n    test_haystack/solr_tests/server/start-solr-test-server.sh\n\nIf no server is found all solr-related tests will be skipped.\n\nConfiguring Elasticsearch\n=========================\n\nThe test suite will try to connect to Elasticsearch on port ``9200``. If no\nserver is found all elasticsearch tests will be skipped. Note that the tests\nare destructive - during the teardown phase they will wipe the cluster clean so\nmake sure you don't run them against an instance with data you wish to keep.\n\nIf you want to run the geo-django tests you may need to review the\n`GeoDjango GEOS and GDAL settings`_ before running these commands::\n\n\tcd test_haystack\n\t./run_tests.py elasticsearch_tests\n\n.. _GeoDjango GEOS and GDAL settings: https://docs.djangoproject.com/en/1.7/ref/contrib/gis/install/geolibs/#geos-library-path\n"
  },
  {
    "path": "docs/searchbackend_api.rst",
    "content": ".. _ref-searchbackend-api:\n\n=====================\n``SearchBackend`` API\n=====================\n\n.. class:: SearchBackend(connection_alias, **connection_options)\n\nThe ``SearchBackend`` class handles interaction directly with the backend. The\nsearch query it performs is usually fed to it from a ``SearchQuery`` class that\nhas been built for that backend.\n\nThis class must be at least partially implemented on a per-backend basis and\nis usually accompanied by a ``SearchQuery`` class within the same module.\n\nUnless you are writing a new backend, it is unlikely you need to directly\naccess this class.\n\n\nMethod Reference\n================\n\n``update``\n----------\n\n.. method:: SearchBackend.update(self, index, iterable)\n\nUpdates the backend when given a ``SearchIndex`` and a collection of\ndocuments.\n\nThis method MUST be implemented by each backend, as it will be highly\nspecific to each one.\n\n``remove``\n----------\n\n.. method:: SearchBackend.remove(self, obj_or_string)\n\nRemoves a document/object from the backend. Can be either a model\ninstance or the identifier (i.e. ``app_name.model_name.id``) in the\nevent the object no longer exists.\n\nThis method MUST be implemented by each backend, as it will be highly\nspecific to each one.\n\n``clear``\n---------\n\n.. method:: SearchBackend.clear(self, models=[])\n\nClears the backend of all documents/objects for a collection of models.\n\nThis method MUST be implemented by each backend, as it will be highly\nspecific to each one.\n\n``search``\n----------\n\n.. method:: SearchBackend.search(self, query_string, sort_by=None, start_offset=0, end_offset=None, fields='', highlight=False, facets=None, date_facets=None, query_facets=None, narrow_queries=None, spelling_query=None, limit_to_registered_models=None, result_class=None, **kwargs)\n\nTakes a query to search on and returns a dictionary.\n\nThe query should be a string that is appropriate syntax for the backend.\n\nThe returned dictionary should contain the keys 'results' and 'hits'.\nThe 'results' value should be an iterable of populated ``SearchResult``\nobjects. The 'hits' should be an integer count of the number of matched\nresults the search backend found.\n\nThis method MUST be implemented by each backend, as it will be highly\nspecific to each one.\n\n``extract_file_contents``\n-------------------------\n\n.. method:: SearchBackend.extract_file_contents(self, file_obj)\n\nPerform text extraction on the provided file or file-like object. Returns either\nNone or a dictionary containing the keys ``contents`` and ``metadata``. The\n``contents`` field will always contain the extracted text content returned by\nthe underlying search engine but ``metadata`` may vary considerably based on\nthe backend and the input file.\n\n``prep_value``\n--------------\n\n.. method:: SearchBackend.prep_value(self, value)\n\nHook to give the backend a chance to prep an attribute value before\nsending it to the search engine.\n\nBy default, just force it to unicode.\n\n``more_like_this``\n------------------\n\n.. method:: SearchBackend.more_like_this(self, model_instance, additional_query_string=None, result_class=None)\n\nTakes a model object and returns results the backend thinks are similar.\n\nThis method MUST be implemented by each backend, as it will be highly\nspecific to each one.\n\n``build_schema``\n----------------\n\n.. method:: SearchBackend.build_schema(self, fields)\n\nTakes a dictionary of fields and returns schema information.\n\nThis method MUST be implemented by each backend, as it will be highly\nspecific to each one.\n\n``build_models_list``\n---------------------\n\n.. method:: SearchBackend.build_models_list(self)\n\nBuilds a list of models for searching.\n\nThe ``search`` method should use this and the ``django_ct`` field to\nnarrow the results (unless the user indicates not to). This helps ignore\nany results that are not currently handled models and ensures\nconsistent caching.\n"
  },
  {
    "path": "docs/searchfield_api.rst",
    "content": ".. _ref-searchfield-api:\n\n===================\n``SearchField`` API\n===================\n\n.. class:: SearchField\n\nThe ``SearchField`` and its subclasses provides a way to declare what data\nyou're interested in indexing. They are used with ``SearchIndexes``, much like\n``forms.*Field`` are used within forms or ``models.*Field`` within models.\n\nThey provide both the means for storing data in the index, as well as preparing\nthe data before it's placed in the index. Haystack uses all fields from all\n``SearchIndex`` classes to determine what the engine's index schema ought to\nlook like.\n\nIn practice, you'll likely never actually use the base ``SearchField``, as the\nsubclasses are much better at handling real data.\n\n\nSubclasses\n==========\n\nIncluded with Haystack are the following field types:\n\n* ``BooleanField``\n* ``CharField``\n* ``DateField``\n* ``DateTimeField``\n* ``DecimalField``\n* ``EdgeNgramField``\n* ``FloatField``\n* ``IntegerField``\n* ``LocationField``\n* ``MultiValueField``\n* ``NgramField``\n\nAnd equivalent faceted versions:\n\n* ``FacetBooleanField``\n* ``FacetCharField``\n* ``FacetDateField``\n* ``FacetDateTimeField``\n* ``FacetDecimalField``\n* ``FacetFloatField``\n* ``FacetIntegerField``\n* ``FacetMultiValueField``\n\n.. note::\n\n  There is no faceted variant of the n-gram fields. Because of how the engine\n  generates n-grams, faceting on these field types (``NgramField`` &\n  ``EdgeNgram``) would make very little sense.\n\n\nUsage\n=====\n\nWhile ``SearchField`` objects can be used on their own, they're generally used\nwithin a ``SearchIndex``. You use them in a declarative manner, just like\nfields in ``django.forms.Form`` or ``django.db.models.Model`` objects. For\nexample::\n\n    from haystack import indexes\n    from myapp.models import Note\n\n\n    class NoteIndex(indexes.SearchIndex, indexes.Indexable):\n        text = indexes.CharField(document=True, use_template=True)\n        author = indexes.CharField(model_attr='user')\n        pub_date = indexes.DateTimeField(model_attr='pub_date')\n\n        def get_model(self):\n            return Note\n\nThis will hook up those fields with the index and, when updating a ``Model``\nobject, pull the relevant data out and prepare it for storage in the index.\n\n\nField Options\n=============\n\n``default``\n-----------\n\n.. attribute:: SearchField.default\n\nProvides a means for specifying a fallback value in the event that no data is\nfound for the field. Can be either a value or a callable.\n\n``document``\n------------\n\n.. attribute:: SearchField.document\n\nA boolean flag that indicates which of the fields in the ``SearchIndex`` ought\nto be the primary field for searching within. Default is ``False``.\n\n.. note::\n\n    Only one field can be marked as the ``document=True`` field, so you should\n    standardize this name and the format of the field between all of your\n    ``SearchIndex`` classes.\n\n``indexed``\n-----------\n\n.. attribute:: SearchField.indexed\n\nA boolean flag for indicating whether or not the data from this field will\nbe searchable within the index. Default is ``True``.\n\nThe companion of this option is ``stored``.\n\n``index_fieldname``\n-------------------\n\n.. attribute:: SearchField.index_fieldname\n\nThe ``index_fieldname`` option allows you to force the name of the field in the\nindex. This does not change how Haystack refers to the field. This is useful\nwhen using Solr's dynamic attributes or when integrating with other external\nsoftware.\n\nDefault is variable name of the field within the ``SearchIndex``.\n\n``model_attr``\n--------------\n\n.. attribute:: SearchField.model_attr\n\nThe ``model_attr`` option is a shortcut for preparing data. Rather than having\nto manually fetch data out of a ``Model``, ``model_attr`` allows you to specify\na string that will automatically pull data out for you. For example::\n\n    # Automatically looks within the model and populates the field with\n    # the ``last_name`` attribute.\n    author = CharField(model_attr='last_name')\n\nIt also handles callables::\n\n    # On a ``User`` object, pulls the full name as pieced together by the\n    # ``get_full_name`` method.\n    author = CharField(model_attr='get_full_name')\n\nAnd can look through relations::\n\n    # Pulls the ``bio`` field from a ``UserProfile`` object that has a\n    # ``OneToOneField`` relationship to a ``User`` object.\n    biography = CharField(model_attr='user__profile__bio')\n\n``null``\n--------\n\n.. attribute:: SearchField.null\n\nA boolean flag for indicating whether or not it's permissible for the field\nnot to contain any data. Default is ``False``.\n\n.. note::\n\n    Unlike Django's database layer, which injects a ``NULL`` into the database\n    when a field is marked nullable, ``null=True`` will actually exclude that\n    field from being included with the document. This is more efficient for the\n    search engine to deal with.\n\n``stored``\n----------\n\n.. attribute:: SearchField.stored\n\nA boolean flag for indicating whether or not the data from this field will\nbe stored within the index. Default is ``True``.\n\nThis is useful for pulling data out of the index along with the search result\nin order to save on hits to the database.\n\nThe companion of this option is ``indexed``.\n\n``template_name``\n-----------------\n\n.. attribute:: SearchField.template_name\n\nAllows you to override the name of the template to use when preparing data. By\ndefault, the data templates for fields are located within your ``TEMPLATE_DIRS``\nunder a path like ``search/indexes/{app_label}/{model_name}_{field_name}.txt``.\nThis option lets you override that path (though still within ``TEMPLATE_DIRS``).\n\nExample::\n\n    bio = CharField(use_template=True, template_name='myapp/data/bio.txt')\n\nYou can also provide a list of templates, as ``loader.select_template`` is used\nunder the hood.\n\nExample::\n\n    bio = CharField(use_template=True, template_name=['myapp/data/bio.txt', 'myapp/bio.txt', 'bio.txt'])\n\n\n``use_template``\n----------------\n\n.. attribute:: SearchField.use_template\n\nA boolean flag for indicating whether or not a field should prepare its data\nvia a data template or not. Default is False.\n\nData templates are extremely useful, as they let you easily tie together\ndifferent parts of the ``Model`` (and potentially related models). This leads\nto better search results with very little effort.\n\n\n\nMethod Reference\n================\n\n``__init__``\n------------\n\n.. method:: SearchField.__init__(self, model_attr=None, use_template=False, template_name=None, document=False, indexed=True, stored=True, faceted=False, default=NOT_PROVIDED, null=False, index_fieldname=None, facet_class=None, boost=1.0, weight=None)\n\nInstantiates a fresh ``SearchField`` instance.\n\n``has_default``\n---------------\n\n.. method:: SearchField.has_default(self)\n\nReturns a boolean of whether this field has a default value.\n\n``prepare``\n-----------\n\n.. method:: SearchField.prepare(self, obj)\n\nTakes data from the provided object and prepares it for storage in the\nindex.\n\n``prepare_template``\n--------------------\n\n.. method:: SearchField.prepare_template(self, obj)\n\nFlattens an object for indexing.\n\nThis loads a template\n(``search/indexes/{app_label}/{model_name}_{field_name}.txt``) and\nreturns the result of rendering that template. ``object`` will be in\nits context.\n\n``convert``\n-----------\n\n.. method:: SearchField.convert(self, value)\n\nHandles conversion between the data found and the type of the field.\n\nExtending classes should override this method and provide correct\ndata coercion.\n"
  },
  {
    "path": "docs/searchindex_api.rst",
    "content": ".. _ref-searchindex-api:\n\n===================\n``SearchIndex`` API\n===================\n\n.. class:: SearchIndex()\n\nThe ``SearchIndex`` class allows the application developer a way to provide data to\nthe backend in a structured format. Developers familiar with Django's ``Form``\nor ``Model`` classes should find the syntax for indexes familiar.\n\nThis class is arguably the most important part of integrating Haystack into your\napplication, as it has a large impact on the quality of the search results and\nhow easy it is for users to find what they're looking for. Care and effort\nshould be put into making your indexes the best they can be.\n\n\nQuick Start\n===========\n\nFor the impatient::\n\n    import datetime\n    from haystack import indexes\n    from myapp.models import Note\n\n\n    class NoteIndex(indexes.SearchIndex, indexes.Indexable):\n        text = indexes.CharField(document=True, use_template=True)\n        author = indexes.CharField(model_attr='user')\n        pub_date = indexes.DateTimeField(model_attr='pub_date')\n\n        def get_model(self):\n            return Note\n\n        def index_queryset(self, using=None):\n            \"Used when the entire index for model is updated.\"\n            return self.get_model().objects.filter(pub_date__lte=datetime.datetime.now())\n\n\nBackground\n==========\n\nUnlike relational databases, most search engines supported by Haystack are\nprimarily document-based. They focus on a single text blob which they tokenize,\nanalyze and index. When searching, this field is usually the primary one that\nis searched.\n\nFurther, the schema used by most engines is the same for all types of data\nadded, unlike a relational database that has a table schema for each chunk of\ndata.\n\nIt may be helpful to think of your search index as something closer to a\nkey-value store instead of imagining it in terms of a RDBMS.\n\n\nWhy Create Fields?\n------------------\n\nDespite being primarily document-driven, most search engines also support the\nability to associate other relevant data with the indexed document. These\nattributes can be mapped through the use of fields within Haystack.\n\nCommon uses include storing pertinent data information, categorizations of the\ndocument, author information and related data. By adding fields for these pieces\nof data, you provide a means to further narrow/filter search terms. This can\nbe useful from either a UI perspective (a better advanced search form) or from a\ndeveloper standpoint (section-dependent search, off-loading certain tasks to\nsearch, et cetera).\n\n.. warning::\n\n    Haystack reserves the following field names for internal use: ``id``,\n    ``django_ct``, ``django_id`` & ``content``. The ``name`` & ``type`` names\n    used to be reserved but no longer are.\n\n    You can override these field names using the ``HAYSTACK_ID_FIELD``,\n    ``HAYSTACK_DJANGO_CT_FIELD`` & ``HAYSTACK_DJANGO_ID_FIELD`` if needed.\n\n\nSignificance Of ``document=True``\n---------------------------------\n\nMost search engines that were candidates for inclusion in Haystack all had a\ncentral concept of a document that they indexed. These documents form a corpus\nwithin which to primarily search. Because this ideal is so central and most of\nHaystack is designed to have pluggable backends, it is important to ensure that\nall engines have at least a bare minimum of the data they need to function.\n\nAs a result, when creating a ``SearchIndex``, one (and only one) field must be\nmarked with ``document=True``. This signifies to Haystack that whatever is\nplaced in this field while indexing is to be the primary text the search engine\nindexes. The name of this field can be almost anything, but ``text`` is one of\nthe more common names used.\n\n\nStored/Indexed Fields\n---------------------\n\nOne shortcoming of the use of search is that you rarely have all or the most\nup-to-date information about an object in the index. As a result, when\nretrieving search results, you will likely have to access the object in the\ndatabase to provide better information.\n\nHowever, this can also hit the database quite heavily (think\n``.get(pk=result.id)`` per object). If your search is popular, this can lead\nto a big performance hit. There are two ways to prevent this. The first way is\n``SearchQuerySet.load_all``, which tries to group all similar objects and pull\nthem through one query instead of many. This still hits the DB and incurs a\nperformance penalty.\n\nThe other option is to leverage stored fields. By default, all fields in\nHaystack are either indexed (searchable by the engine) or stored (retained by\nthe engine and presented in the results). By using a stored field, you can\nstore commonly used data in such a way that you don't need to hit the database\nwhen processing the search result to get more information.\n\nFor example, one great way to leverage this is to pre-render an object's\nsearch result template DURING indexing. You define an additional field, render\na template with it and it follows the main indexed record into the index. Then,\nwhen that record is pulled when it matches a query, you can simply display the\ncontents of that field, which avoids the database hit.:\n\nWithin ``myapp/search_indexes.py``::\n\n    class NoteIndex(SearchIndex, indexes.Indexable):\n        text = CharField(document=True, use_template=True)\n        author = CharField(model_attr='user')\n        pub_date = DateTimeField(model_attr='pub_date')\n        # Define the additional field.\n        rendered = CharField(use_template=True, indexed=False)\n\nThen, inside a template named ``search/indexes/myapp/note_rendered.txt``::\n\n    <h2>{{ object.title }}</h2>\n\n    <p>{{ object.content }}</p>\n\nAnd finally, in ``search/search.html``::\n\n    ...\n\n    {% for result in page.object_list %}\n        <div class=\"search_result\">\n            {{ result.rendered|safe }}\n        </div>\n    {% endfor %}\n\n\nKeeping The Index Fresh\n=======================\n\nThere are several approaches to keeping the search index in sync with your\ndatabase. None are more correct than the others and which one you use depends\non the traffic you see, the churn rate of your data, and what concerns are\nimportant to you (CPU load, how recent, et cetera).\n\nThe conventional method is to use ``SearchIndex`` in combination with cron\njobs. Running a ``./manage.py update_index`` every couple hours will keep your\ndata in sync within that timeframe and will handle the updates in a very\nefficient batch. Additionally, Whoosh (and to a lesser extent Xapian) behaves\nbetter when using this approach.\n\nAnother option is to use ``RealtimeSignalProcessor``, which uses Django's\nsignals to immediately update the index any time a model is saved/deleted. This\nyields a much more current search index at the expense of being fairly\ninefficient. Solr & Elasticsearch are the only backends that handles this well\nunder load, and even then, you should make sure you have the server capacity\nto spare.\n\nA third option is to develop a custom ``QueuedSignalProcessor`` that, much like\n``RealtimeSignalProcessor``, uses Django's signals to enqueue messages for\nupdates/deletes. Then writing a management command to consume these messages\nin batches, yielding a nice compromise between the previous two options.\n\nFor more information see :doc:`signal_processors`.\n\n.. note::\n\n    Haystack doesn't ship with a ``QueuedSignalProcessor`` largely because there is\n    such a diversity of lightweight queuing options and that they tend to\n    polarize developers. Queuing is outside of Haystack's goals (provide good,\n    powerful search) and, as such, is left to the developer.\n\n    Additionally, the implementation is relatively trivial & there are already\n    good third-party add-ons for Haystack to enable this.\n\n\nAdvanced Data Preparation\n=========================\n\nIn most cases, using the `model_attr` parameter on your fields allows you to\neasily get data from a Django model to the document in your index, as it handles\nboth direct attribute access as well as callable functions within your model.\n\n.. note::\n\n    The ``model_attr`` keyword argument also can look through relations in\n    models. So you can do something like ``model_attr='author__first_name'``\n    to pull just the first name of the author, similar to some lookups used\n    by Django's ORM.\n\nHowever, sometimes, even more control over what gets placed in your index is\nneeded. To facilitate this, ``SearchIndex`` objects have a 'preparation' stage\nthat populates data just before it is indexed. You can hook into this phase in\nseveral ways.\n\nThis should be very familiar to developers who have used Django's ``forms``\nbefore as it loosely follows similar concepts, though the emphasis here is\nless on cleansing data from user input and more on making the data friendly\nto the search backend.\n\n1. ``prepare_FOO(self, object)``\n--------------------------------\n\nThe most common way to affect a single field's data is to create a\n``prepare_FOO`` method (where FOO is the name of the field). As a parameter\nto this method, you will receive the instance that is attempting to be indexed.\n\n.. note::\n\n   This method is analogous to Django's ``Form.clean_FOO`` methods.\n\nTo keep with our existing example, one use case might be altering the name\ninside the ``author`` field to be \"firstname lastname <email>\". In this case,\nyou might write the following code::\n\n    class NoteIndex(SearchIndex, indexes.Indexable):\n        text = CharField(document=True, use_template=True)\n        author = CharField(model_attr='user')\n        pub_date = DateTimeField(model_attr='pub_date')\n\n        def get_model(self):\n            return Note\n\n        def prepare_author(self, obj):\n            return \"%s <%s>\" % (obj.user.get_full_name(), obj.user.email)\n\nThis method should return a single value (or list/tuple/dict) to populate that\nfield's data upon indexing. Note that this method takes priority over whatever\ndata may come from the field itself.\n\nJust like ``Form.clean_FOO``, the field's ``prepare`` runs before the\n``prepare_FOO``, allowing you to access ``self.prepared_data``. For example::\n\n    class NoteIndex(SearchIndex, indexes.Indexable):\n        text = CharField(document=True, use_template=True)\n        author = CharField(model_attr='user')\n        pub_date = DateTimeField(model_attr='pub_date')\n\n        def get_model(self):\n            return Note\n\n        def prepare_author(self, obj):\n            # Say we want last name first, the hard way.\n            author = u''\n\n            if 'author' in self.prepared_data:\n                name_bits = self.prepared_data['author'].split()\n                author = \"%s, %s\" % (name_bits[-1], ' '.join(name_bits[:-1]))\n\n            return author\n\nThis method is fully function with ``model_attr``, so if there's no convenient\nway to access the data you want, this is an excellent way to prepare it::\n\n    class NoteIndex(SearchIndex, indexes.Indexable):\n        text = CharField(document=True, use_template=True)\n        author = CharField(model_attr='user')\n        categories = MultiValueField()\n        pub_date = DateTimeField(model_attr='pub_date')\n\n        def get_model(self):\n            return Note\n\n        def prepare_categories(self, obj):\n            # Since we're using a M2M relationship with a complex lookup,\n            # we can prepare the list here.\n            return [category.id for category in obj.category_set.active().order_by('-created')]\n\n\n2. ``prepare(self, object)``\n----------------------------\n\nEach ``SearchIndex`` gets a ``prepare`` method, which handles collecting all\nthe data. This method should return a dictionary that will be the final data\nused by the search backend.\n\nOverriding this method is useful if you need to collect more than one piece\nof data or need to incorporate additional data that is not well represented\nby a single ``SearchField``. An example might look like::\n\n    class NoteIndex(SearchIndex, indexes.Indexable):\n        text = CharField(document=True, use_template=True)\n        author = CharField(model_attr='user')\n        pub_date = DateTimeField(model_attr='pub_date')\n\n        def get_model(self):\n            return Note\n\n        def prepare(self, object):\n            self.prepared_data = super(NoteIndex, self).prepare(object)\n\n            # Add in tags (assuming there's a M2M relationship to Tag on the model).\n            # Note that this would NOT get picked up by the automatic\n            # schema tools provided by Haystack.\n            self.prepared_data['tags'] = [tag.name for tag in object.tags.all()]\n\n            return self.prepared_data\n\nIf you choose to use this method, you should make a point to be careful to call\nthe ``super()`` method before altering the data. Without doing so, you may have\nan incomplete set of data populating your indexes.\n\nThis method has the final say in all data, overriding both what the fields\nprovide as well as any ``prepare_FOO`` methods on the class.\n\n.. note::\n\n   This method is roughly analogous to Django's ``Form.full_clean`` and\n   ``Form.clean`` methods. However, unlike these methods, it is not fired\n   as the result of trying to access ``self.prepared_data``. It requires\n   an explicit call.\n\n\n3. Overriding ``prepare(self, object)`` On Individual ``SearchField`` Objects\n-----------------------------------------------------------------------------\n\nThe final way to manipulate your data is to implement a custom ``SearchField``\nobject and write its ``prepare`` method to populate/alter the data any way you\nchoose. For instance, a (naive) user-created ``GeoPointField`` might look\nsomething like::\n\n    from haystack import indexes\n\n    class GeoPointField(indexes.CharField):\n        def __init__(self, **kwargs):\n            kwargs['default'] = '0.00-0.00'\n            super(GeoPointField, self).__init__(**kwargs)\n\n        def prepare(self, obj):\n            return \"%s-%s\" % (obj.latitude, obj.longitude)\n\nThe ``prepare`` method simply returns the value to be used for that field. It's\nentirely possible to include data that's not directly referenced to the object\nhere, depending on your needs.\n\nNote that this is NOT a recommended approach to storing geographic data in a\nsearch engine (there is no formal suggestion on this as support is usually\nnon-existent), merely an example of how to extend existing fields.\n\n.. note::\n\n   This method is analagous to Django's ``Field.clean`` methods.\n\n\nAdding New Fields\n=================\n\nIf you have an existing ``SearchIndex`` and you add a new field to it, Haystack\nwill add this new data on any updates it sees after that point. However, this\nwill not populate the existing data you already have.\n\nIn order for the data to be picked up, you will need to run ``./manage.py\nrebuild_index``. This will cause all backends to rebuild the existing data\nalready present in the quickest and most efficient way.\n\n.. note::\n\n    With the Solr backend, you'll also have to add to the appropriate\n    ``schema.xml`` for your configuration before running the ``rebuild_index``.\n\n\n``Search Index``\n================\n\n``get_model``\n-------------\n\n.. method:: SearchIndex.get_model(self)\n\nShould return the ``Model`` class (not an instance) that the rest of the\n``SearchIndex`` should use.\n\nThis method is required & you must override it to return the correct class.\n\n``index_queryset``\n------------------\n\n.. method:: SearchIndex.index_queryset(self, using=None)\n\nGet the default QuerySet to index when doing a full update.\n\nSubclasses can override this method to avoid indexing certain objects.\n\n``read_queryset``\n-----------------\n\n.. method:: SearchIndex.read_queryset(self, using=None)\n\nGet the default QuerySet for read actions.\n\nSubclasses can override this method to work with other managers.\nUseful when working with default managers that filter some objects.\n\n``build_queryset``\n-------------------\n\n.. method:: SearchIndex.build_queryset(self, start_date=None, end_date=None)\n\nGet the default QuerySet to index when doing an index update.\n\nSubclasses can override this method to take into account related\nmodel modification times.\n\nThe default is to use ``SearchIndex.index_queryset`` and filter\nbased on ``SearchIndex.get_updated_field``\n\n``prepare``\n-----------\n\n.. method:: SearchIndex.prepare(self, obj)\n\nFetches and adds/alters data before indexing.\n\n``get_content_field``\n---------------------\n\n.. method:: SearchIndex.get_content_field(self)\n\nReturns the field that supplies the primary document to be indexed.\n\n``update``\n----------\n\n.. method:: SearchIndex.update(self, using=None)\n\nUpdates the entire index.\n\nIf ``using`` is provided, it specifies which connection should be\nused. Default relies on the routers to decide which backend should\nbe used.\n\n``update_object``\n-----------------\n\n.. method:: SearchIndex.update_object(self, instance, using=None, **kwargs)\n\nUpdate the index for a single object. Attached to the class's\npost-save hook.\n\nIf ``using`` is provided, it specifies which connection should be\nused. Default relies on the routers to decide which backend should\nbe used.\n\n``remove_object``\n-----------------\n\n.. method:: SearchIndex.remove_object(self, instance, using=None, **kwargs)\n\nRemove an object from the index. Attached to the class's\npost-delete hook.\n\nIf ``using`` is provided, it specifies which connection should be\nused. Default relies on the routers to decide which backend should\nbe used.\n\n``clear``\n---------\n\n.. method:: SearchIndex.clear(self, using=None)\n\nClears the entire index.\n\nIf ``using`` is provided, it specifies which connection should be\nused. Default relies on the routers to decide which backend should\nbe used.\n\n``reindex``\n-----------\n\n.. method:: SearchIndex.reindex(self, using=None)\n\nCompletely clears the index for this model and rebuilds it.\n\nIf ``using`` is provided, it specifies which connection should be\nused. Default relies on the routers to decide which backend should\nbe used.\n\n``get_updated_field``\n---------------------\n\n.. method:: SearchIndex.get_updated_field(self)\n\nGet the field name that represents the updated date for the model.\n\nIf specified, this is used by the reindex command to filter out results\nfrom the ``QuerySet``, enabling you to reindex only recent records. This\nmethod should either return None (reindex everything always) or a\nstring of the ``Model``'s ``DateField``/``DateTimeField`` name.\n\n``should_update``\n-----------------\n\n.. method:: SearchIndex.should_update(self, instance, **kwargs)\n\nDetermine if an object should be updated in the index.\n\nIt's useful to override this when an object may save frequently and\ncause excessive reindexing. You should check conditions on the instance\nand return False if it is not to be indexed.\n\nThe ``kwargs`` passed along to this method can be the same as the ones passed\nby Django when a Model is saved/delete, so it's possible to check if the object\nhas been created or not. See ``django.db.models.signals.post_save`` for details\non what is passed.\n\nBy default, returns True (always reindex).\n\n``load_all_queryset``\n---------------------\n\n.. method:: SearchIndex.load_all_queryset(self)\n\nProvides the ability to override how objects get loaded in conjunction\nwith ``RelatedSearchQuerySet.load_all``. This is useful for post-processing the\nresults from the query, enabling things like adding ``select_related`` or\nfiltering certain data.\n\n.. warning::\n\n    Utilizing this functionality can have negative performance implications.\n    Please see the section on ``RelatedSearchQuerySet`` within\n    :doc:`searchqueryset_api` for further information.\n\nBy default, returns ``all()`` on the model's default manager.\n\nExample::\n\n    class NoteIndex(SearchIndex, indexes.Indexable):\n        text = CharField(document=True, use_template=True)\n        author = CharField(model_attr='user')\n        pub_date = DateTimeField(model_attr='pub_date')\n\n        def get_model(self):\n            return Note\n\n        def load_all_queryset(self):\n            # Pull all objects related to the Note in search results.\n            return Note.objects.all().select_related()\n\nWhen searching, the ``RelatedSearchQuerySet`` appends on a call to ``in_bulk``, so be\nsure that the ``QuerySet`` you provide can accommodate this and that the ids\npassed to ``in_bulk`` will map to the model in question.\n\nIf you need a specific ``QuerySet`` in one place, you can specify this at the\n``RelatedSearchQuerySet`` level using the ``load_all_queryset`` method. See\n:doc:`searchqueryset_api` for usage.\n\n\n``ModelSearchIndex``\n====================\n\nThe ``ModelSearchIndex`` class allows for automatic generation of a\n``SearchIndex`` based on the fields of the model assigned to it.\n\nWith the exception of the automated introspection, it is a ``SearchIndex``\nclass, so all notes above pertaining to ``SearchIndexes`` apply. As with the\n``ModelForm`` class in Django, it employs an inner class called ``Meta``, which\nshould contain a ``model`` attribute. By default all non-relational model\nfields are included as search fields on the index, but fields can be restricted\nby way of a ``fields`` whitelist, or excluded with an ``excludes`` list, to\nprevent certain fields from appearing in the class.\n\nIn addition, it adds a `text` field that is the ``document=True`` field and\nhas `use_template=True` option set, just like the ``BasicSearchIndex``.\n\n.. warning::\n\n    Usage of this class might result in inferior ``SearchIndex`` objects, which\n    can directly affect your search results. Use this to establish basic\n    functionality and move to custom `SearchIndex` objects for better control.\n\nAt this time, it does not handle related fields.\n\nQuick Start\n-----------\n\nFor the impatient::\n\n    import datetime\n    from haystack import indexes\n    from myapp.models import Note\n\n    # All Fields\n    class AllNoteIndex(indexes.ModelSearchIndex, indexes.Indexable):\n        class Meta:\n            model = Note\n\n    # Blacklisted Fields\n    class LimitedNoteIndex(indexes.ModelSearchIndex, indexes.Indexable):\n        class Meta:\n            model = Note\n            excludes = ['user']\n\n    # Whitelisted Fields\n    class NoteIndex(indexes.ModelSearchIndex, indexes.Indexable):\n        class Meta:\n            model = Note\n            fields = ['user', 'pub_date']\n\n        # Note that regular ``SearchIndex`` methods apply.\n        def index_queryset(self, using=None):\n            \"Used when the entire index for model is updated.\"\n            return Note.objects.filter(pub_date__lte=datetime.datetime.now())\n"
  },
  {
    "path": "docs/searchquery_api.rst",
    "content": ".. _ref-searchquery-api:\n\n===================\n``SearchQuery`` API\n===================\n\n.. class:: SearchQuery(using=DEFAULT_ALIAS)\n\nThe ``SearchQuery`` class acts as an intermediary between ``SearchQuerySet``'s\nabstraction and ``SearchBackend``'s actual search. Given the metadata provided\nby ``SearchQuerySet``, ``SearchQuery`` builds the actual query and interacts\nwith the ``SearchBackend`` on ``SearchQuerySet``'s behalf.\n\nThis class must be at least partially implemented on a per-backend basis, as portions\nare highly specific to the backend. It usually is bundled with the accompanying\n``SearchBackend``.\n\nMost people will **NOT** have to use this class directly. ``SearchQuerySet``\nhandles all interactions with ``SearchQuery`` objects and provides a nicer\ninterface to work with.\n\nShould you need advanced/custom behavior, you can supply your version of\n``SearchQuery`` that overrides/extends the class in the manner you see fit.\nYou can either hook it up in a ``BaseEngine`` subclass or ``SearchQuerySet``\nobjects take a kwarg parameter ``query`` where you can pass in your class.\n\n\n``SQ`` Objects\n==============\n\nFor expressing more complex queries, especially involving AND/OR/NOT in\ndifferent combinations, you should use ``SQ`` objects. Like\n``django.db.models.Q`` objects, ``SQ`` objects can be passed to\n``SearchQuerySet.filter`` and use the familiar unary operators (``&``, ``|`` and\n``~``) to generate complex parts of the query.\n\n.. warning::\n\n    Any data you pass to ``SQ`` objects is passed along **unescaped**. If\n    you don't trust the data you're passing along, you should use\n    the ``clean`` method on your ``SearchQuery`` to sanitize the data.\n\nExample::\n\n    from haystack.query import SQ\n\n    # We want \"title: Foo AND (tags:bar OR tags:moof)\"\n    sqs = SearchQuerySet().filter(title='Foo').filter(SQ(tags='bar') | SQ(tags='moof'))\n\n    # To clean user-provided data:\n    sqs = SearchQuerySet()\n    clean_query = sqs.query.clean(user_query)\n    sqs = sqs.filter(SQ(title=clean_query) | SQ(tags=clean_query))\n\nInternally, the ``SearchQuery`` object maintains a tree of ``SQ`` objects. Each\n``SQ`` object supports what field it looks up against, what kind of lookup (i.e.\nthe ``__`` filters), what value it's looking for, if it's a AND/OR/NOT and\ntracks any children it may have. The ``SearchQuery.build_query`` method starts\nwith the root of the tree, building part of the final query at each node until\nthe full final query is ready for the ``SearchBackend``.\n\n\nBackend-Specific Methods\n========================\n\nWhen implementing a new backend, the following methods will need to be created:\n\n``build_query_fragment``\n~~~~~~~~~~~~~~~~~~~~~~~~\n\n.. method:: SearchQuery.build_query_fragment(self, field, filter_type, value)\n\nGenerates a query fragment from a field, filter type and a value.\n\nMust be implemented in backends as this will be highly backend specific.\n\n\nInheritable Methods\n===================\n\nThe following methods have a complete implementation in the base class and\ncan largely be used unchanged.\n\n``build_query``\n~~~~~~~~~~~~~~~\n\n.. method:: SearchQuery.build_query(self)\n\nInterprets the collected query metadata and builds the final query to\nbe sent to the backend.\n\n``build_params``\n~~~~~~~~~~~~~~~~\n\n.. method:: SearchQuery.build_params(self, spelling_query=None)\n\nGenerates a list of params to use when searching.\n\n``clean``\n~~~~~~~~~\n\n.. method:: SearchQuery.clean(self, query_fragment)\n\nProvides a mechanism for sanitizing user input before presenting the\nvalue to the backend.\n\nA basic (override-able) implementation is provided.\n\n``run``\n~~~~~~~\n\n.. method:: SearchQuery.run(self, spelling_query=None, **kwargs)\n\nBuilds and executes the query. Returns a list of search results.\n\nOptionally passes along an alternate query for spelling suggestions.\n\nOptionally passes along more kwargs for controlling the search query.\n\n``run_mlt``\n~~~~~~~~~~~\n\n.. method:: SearchQuery.run_mlt(self, **kwargs)\n\nExecutes the More Like This. Returns a list of search results similar\nto the provided document (and optionally query).\n\n``run_raw``\n~~~~~~~~~~~\n\n.. method:: SearchQuery.run_raw(self, **kwargs)\n\nExecutes a raw query. Returns a list of search results.\n\n``get_count``\n~~~~~~~~~~~~~\n\n.. method:: SearchQuery.get_count(self)\n\nReturns the number of results the backend found for the query.\n\nIf the query has not been run, this will execute the query and store\nthe results.\n\n``get_results``\n~~~~~~~~~~~~~~~\n\n.. method:: SearchQuery.get_results(self, **kwargs)\n\nReturns the results received from the backend.\n\nIf the query has not been run, this will execute the query and store\nthe results.\n\n``get_facet_counts``\n~~~~~~~~~~~~~~~~~~~~\n\n.. method:: SearchQuery.get_facet_counts(self)\n\nReturns the results received from the backend.\n\nIf the query has not been run, this will execute the query and store\nthe results.\n\n``boost_fragment``\n~~~~~~~~~~~~~~~~~~\n\n.. method:: SearchQuery.boost_fragment(self, boost_word, boost_value)\n\nGenerates query fragment for boosting a single word/value pair.\n\n``matching_all_fragment``\n~~~~~~~~~~~~~~~~~~~~~~~~~\n\n.. method:: SearchQuery.matching_all_fragment(self)\n\nGenerates the query that matches all documents.\n\n``add_filter``\n~~~~~~~~~~~~~~\n\n.. method:: SearchQuery.add_filter(self, expression, value, use_not=False, use_or=False)\n\nNarrows the search by requiring certain conditions.\n\n``add_order_by``\n~~~~~~~~~~~~~~~~\n\n.. method:: SearchQuery.add_order_by(self, field)\n\nOrders the search result by a field.\n\n``clear_order_by``\n~~~~~~~~~~~~~~~~~~\n\n.. method:: SearchQuery.clear_order_by(self)\n\nClears out all ordering that has been already added, reverting the\nquery to relevancy.\n\n``add_model``\n~~~~~~~~~~~~~\n\n.. method:: SearchQuery.add_model(self, model)\n\nRestricts the query requiring matches in the given model.\n\nThis builds upon previous additions, so you can limit to multiple models\nby chaining this method several times.\n\n``set_limits``\n~~~~~~~~~~~~~~\n\n.. method:: SearchQuery.set_limits(self, low=None, high=None)\n\nRestricts the query by altering either the start, end or both offsets.\n\n``clear_limits``\n~~~~~~~~~~~~~~~~\n\n.. method:: SearchQuery.clear_limits(self)\n\nClears any existing limits.\n\n``add_boost``\n~~~~~~~~~~~~~\n\n.. method:: SearchQuery.add_boost(self, term, boost_value)\n\nAdds a boosted term and the amount to boost it to the query.\n\n``raw_search``\n~~~~~~~~~~~~~~\n\n.. method:: SearchQuery.raw_search(self, query_string, **kwargs)\n\nRuns a raw query (no parsing) against the backend.\n\nThis method causes the ``SearchQuery`` to ignore the standard query-generating \nfacilities, running only what was provided instead.\n\nNote that any kwargs passed along will override anything provided\nto the rest of the ``SearchQuerySet``.\n\n``more_like_this``\n~~~~~~~~~~~~~~~~~~\n\n.. method:: SearchQuery.more_like_this(self, model_instance)\n\nAllows backends with support for \"More Like This\" to return results\nsimilar to the provided instance.\n\n``add_stats_query``\n~~~~~~~~~~~~~~~~~~~\n.. method:: SearchQuery.add_stats_query(self,stats_field,stats_facets)\n\nAdds stats and stats_facets queries for the Solr backend.\n\n``add_highlight``\n~~~~~~~~~~~~~~~~~\n\n.. method:: SearchQuery.add_highlight(self)\n\nAdds highlighting to the search results.\n\n``add_within``\n~~~~~~~~~~~~~~\n\n.. method:: SearchQuery.add_within(self, field, point_1, point_2):\n\nAdds bounding box parameters to search query.\n\n``add_dwithin``\n~~~~~~~~~~~~~~~\n\n.. method:: SearchQuery.add_dwithin(self, field, point, distance):\n\nAdds radius-based parameters to search query.\n\n``add_distance``\n~~~~~~~~~~~~~~~~\n\n.. method:: SearchQuery.add_distance(self, field, point):\n\nDenotes that results should include distance measurements from the\npoint passed in.\n\n``add_field_facet``\n~~~~~~~~~~~~~~~~~~~\n\n.. method:: SearchQuery.add_field_facet(self, field, **options)\n\nAdds a regular facet on a field.\n\n``add_date_facet``\n~~~~~~~~~~~~~~~~~~\n\n.. method:: SearchQuery.add_date_facet(self, field, start_date, end_date, gap_by, gap_amount)\n\nAdds a date-based facet on a field.\n\n``add_query_facet``\n~~~~~~~~~~~~~~~~~~~\n\n.. method:: SearchQuery.add_query_facet(self, field, query)\n\nAdds a query facet on a field.\n\n``add_narrow_query``\n~~~~~~~~~~~~~~~~~~~~\n\n.. method:: SearchQuery.add_narrow_query(self, query)\n\nNarrows a search to a subset of all documents per the query.\n\nGenerally used in conjunction with faceting.\n\n``set_result_class``\n~~~~~~~~~~~~~~~~~~~~\n\n.. method:: SearchQuery.set_result_class(self, klass)\n\nSets the result class to use for results.\n\nOverrides any previous usages. If ``None`` is provided, Haystack will\nrevert back to the default ``SearchResult`` object.\n\n``using``\n~~~~~~~~~\n\n.. method:: SearchQuery.using(self, using=None)\n\nAllows for overriding which connection should be used. This\ndisables the use of routers when performing the query.\n\nIf ``None`` is provided, it has no effect on what backend is used.\n"
  },
  {
    "path": "docs/searchqueryset_api.rst",
    "content": ".. _ref-searchqueryset-api:\n\n======================\n``SearchQuerySet`` API\n======================\n\n.. class:: SearchQuerySet(using=None, query=None)\n\nThe ``SearchQuerySet`` class is designed to make performing a search and\niterating over its results easy and consistent. For those familiar with Django's\nORM ``QuerySet``, much of the ``SearchQuerySet`` API should feel familiar.\n\n\nWhy Follow ``QuerySet``?\n========================\n\nA couple reasons to follow (at least in part) the ``QuerySet`` API:\n\n#. Consistency with Django\n#. Most Django programmers have experience with the ORM and can use this\n   knowledge with ``SearchQuerySet``.\n\nAnd from a high-level perspective, ``QuerySet`` and ``SearchQuerySet`` do very similar\nthings: given certain criteria, provide a set of results. Both are powered by\nmultiple backends, both are abstractions on top of the way a query is performed.\n\n\nQuick Start\n===========\n\nFor the impatient::\n\n    from haystack.query import SearchQuerySet\n    all_results = SearchQuerySet().all()\n    hello_results = SearchQuerySet().filter(content='hello')\n    hello_world_results = SearchQuerySet().filter(content='hello world')\n    unfriendly_results = SearchQuerySet().exclude(content='hello').filter(content='world')\n    recent_results = SearchQuerySet().order_by('-pub_date')[:5]\n\n    # Using the new input types...\n    from haystack.inputs import AutoQuery, Exact, Clean\n    sqs = SearchQuerySet().filter(content=AutoQuery(request.GET['q']), product_type=Exact('ancient book'))\n\n    if request.GET['product_url']:\n        sqs = sqs.filter(product_url=Clean(request.GET['product_url']))\n\nFor more on the ``AutoQuery``, ``Exact``, ``Clean`` classes & friends, see the\n:ref:`ref-inputtypes` documentation.\n\n\n``SearchQuerySet``\n==================\n\nBy default, ``SearchQuerySet`` provide the documented functionality. You can\nextend with your own behavior by simply subclassing from ``SearchQuerySet`` and\nadding what you need, then using your subclass in place of ``SearchQuerySet``.\n\nMost methods in ``SearchQuerySet`` \"chain\" in a similar fashion to ``QuerySet``.\nAdditionally, like ``QuerySet``, ``SearchQuerySet`` is lazy (meaning it evaluates the\nquery as late as possible). So the following is valid::\n\n    from haystack.query import SearchQuerySet\n    results = SearchQuerySet().exclude(content='hello').filter(content='world').order_by('-pub_date').boost('title', 0.5)[10:20]\n\n\nThe ``content`` Shortcut\n========================\n\nSearching your document fields is a very common activity. To help mitigate\npossible differences in ``SearchField`` names (and to help the backends deal\nwith search queries that inspect the main corpus), there is a special field\ncalled ``content``. You may use this in any place that other fields names would\nwork (e.g. ``filter``, ``exclude``, etc.) to indicate you simply want to\nsearch the main documents.\n\nFor example::\n\n    from haystack.query import SearchQuerySet\n\n    # This searches whatever fields were marked ``document=True``.\n    results = SearchQuerySet().exclude(content='hello')\n\nThis special pseudo-field works best with the ``exact`` lookup and may yield\nstrange or unexpected results with the other lookups.\n\n\n``SearchQuerySet`` Methods\n==========================\n\nThe primary interface to search in Haystack is through the ``SearchQuerySet``\nobject. It provides a clean, programmatic, portable API to the search backend.\nMany aspects are also \"chainable\", meaning you can call methods one after another, each\napplying their changes to the previous ``SearchQuerySet`` and further narrowing\nthe search.\n\nAll ``SearchQuerySet`` objects implement a list-like interface, meaning you can\nperform actions like getting the length of the results, accessing a result at an\noffset or even slicing the result list.\n\n\nMethods That Return A ``SearchQuerySet``\n----------------------------------------\n\n``all``\n~~~~~~~\n\n.. method:: SearchQuerySet.all(self):\n\nReturns all results for the query. This is largely a no-op (returns an identical\ncopy) but useful for denoting exactly what behavior is going on.\n\n``none``\n~~~~~~~~\n\n.. method:: SearchQuerySet.none(self):\n\nReturns an ``EmptySearchQuerySet`` that behaves like a ``SearchQuerySet`` but\nalways yields no results.\n\n``filter``\n~~~~~~~~~~\n\n.. method:: SearchQuerySet.filter(self, **kwargs)\n\nFilters the search by looking for (and including) certain attributes.\n\nThe lookup parameters (``**kwargs``) should follow the `Field lookups`_ below.\nIf you specify more than one pair, they will be joined in the query according to\nthe ``HAYSTACK_DEFAULT_OPERATOR`` setting (defaults to ``AND``).\n\nYou can pass it either strings or a variety of :ref:`ref-inputtypes` if you\nneed more advanced query behavior.\n\n.. warning::\n\n    Any data you pass to ``filter`` gets auto-escaped. If you need to send\n    non-escaped data, use the ``Raw`` input type (:ref:`ref-inputtypes`).\n\n    Also, if a string with one or more spaces in it is specified as the value, the\n    string will get passed along **AS IS**. This will mean that it will **NOT**\n    be treated as a phrase (like Haystack 1.X's behavior).\n\n    If you want to match a phrase, you should use either the ``__exact`` filter\n    type or the ``Exact`` input type (:ref:`ref-inputtypes`).\n\nExamples::\n\n    sqs = SearchQuerySet().filter(content='foo')\n\n    sqs = SearchQuerySet().filter(content='foo', pub_date__lte=datetime.date(2008, 1, 1))\n\n    # Identical to the previous example.\n    sqs = SearchQuerySet().filter(content='foo').filter(pub_date__lte=datetime.date(2008, 1, 1))\n\n    # To send unescaped data:\n    from haystack.inputs import Raw\n    sqs = SearchQuerySet().filter(title=Raw(trusted_query))\n\n    # To use auto-query behavior on a non-``document=True`` field.\n    from haystack.inputs import AutoQuery\n    sqs = SearchQuerySet().filter(title=AutoQuery(user_query))\n\n\n``exclude``\n~~~~~~~~~~~\n\n.. method:: SearchQuerySet.exclude(self, **kwargs)\n\nNarrows the search by ensuring certain attributes are not included.\n\n.. warning::\n\n    Any data you pass to ``exclude`` gets auto-escaped. If you need to send\n    non-escaped data, use the ``Raw`` input type (:ref:`ref-inputtypes`).\n\nExample::\n\n    sqs = SearchQuerySet().exclude(content='foo')\n\n\n``filter_and``\n~~~~~~~~~~~~~~\n\n.. method:: SearchQuerySet.filter_and(self, **kwargs)\n\nNarrows the search by looking for (and including) certain attributes. Join\nbehavior in the query is forced to be ``AND``. Used primarily by the ``filter``\nmethod.\n\n``filter_or``\n~~~~~~~~~~~~~\n\n.. method:: SearchQuerySet.filter_or(self, **kwargs)\n\nNarrows the search by looking for (and including) certain attributes. Join\nbehavior in the query is forced to be ``OR``. Used primarily by the ``filter``\nmethod.\n\n``order_by``\n~~~~~~~~~~~~\n\n.. method:: SearchQuerySet.order_by(self, *args)\n\nAlters the order in which the results should appear. Arguments should be strings\nthat map to the attributes/fields within the index. You may specify multiple\nfields by comma separating them::\n\n    SearchQuerySet().filter(content='foo').order_by('author', 'pub_date')\n\nDefault behavior is ascending order. To specify descending order, prepend the\nstring with a ``-``::\n\n    SearchQuerySet().filter(content='foo').order_by('-pub_date')\n\n.. note::\n\n    In general, ordering is locale-specific. Haystack makes no effort to try to\n    reconcile differences between characters from different languages. This\n    means that accented characters will sort closely with the same character\n    and **NOT** necessarily close to the unaccented form of the character.\n\n    If you want this kind of behavior, you should override the ``prepare_FOO``\n    methods on your ``SearchIndex`` objects to transliterate the characters\n    as you see fit.\n\n``highlight``\n~~~~~~~~~~~~~\n\n.. method:: SearchQuerySet.highlight(self)\n\nIf supported by the backend, the ``SearchResult`` objects returned will include\na highlighted version of the result::\n\n    sqs = SearchQuerySet().filter(content='foo').highlight()\n    result = sqs[0]\n    result.highlighted['text'][0] # u'Two computer scientists walk into a bar. The bartender says \"<em>Foo</em>!\".'\n\nThe default functionality of the highlighter may not suit your needs.\nYou can pass additional keyword arguments to ``highlight`` that will\nultimately be used to build the query for your backend. Depending on the\navailable arguments for your backend, you may need to pass in a dictionary\ninstead of normal keyword arguments::\n\n    # Solr defines the fields to higlight by the ``hl.fl`` param. If not specified, we\n    # would only get `text` back in the ``highlighted`` dict.\n    kwargs = {\n        'hl.fl': 'other_field',\n        'hl.simple.pre': '<span class=\"highlighted\">',\n        'hl.simple.post': '</span>'\n    }\n    sqs = SearchQuerySet().filter(content='foo').highlight(**kwargs)\n    result = sqs[0]\n    result.highlighted['other_field'][0] # u'Two computer scientists walk into a bar. The bartender says \"<span class=\"highlighted\">Foo</span>!\".'\n\nElasticsearch accepts keyword arguments::\n\n    # Use the ``pre_tag`` and ``post_tag`` keywords and pass the desired tags as lists.\n    sqs = SearchQuerySet().filter(content='foo').highlight(\n        pre_tags=['<strong>'], post_tags=['</strong>'])\n    result_example = \" \".join(sqs[0].highlighted)\n    # u'Two <strong>foo</strong> computer scientists walk into a bar. The bartender says \"<strong>Foo</strong>!\"'\n\n``models``\n~~~~~~~~~~\n\n.. method:: SearchQuerySet.models(self, *models)\n\nAccepts an arbitrary number of Model classes to include in the search. This will\nnarrow the search results to only include results from the models specified.\n\nExample::\n\n    SearchQuerySet().filter(content='foo').models(BlogEntry, Comment)\n\n``result_class``\n~~~~~~~~~~~~~~~~\n\n.. method:: SearchQuerySet.result_class(self, klass)\n\nAllows specifying a different class to use for results.\n\nOverrides any previous usages. If ``None`` is provided, Haystack will\nrevert back to the default ``SearchResult`` object.\n\nExample::\n\n    SearchQuerySet().result_class(CustomResult)\n\n``boost``\n~~~~~~~~~\n\n.. method:: SearchQuerySet.boost(self, term, boost_value)\n\nBoosts a certain term of the query. You provide the term to be boosted and the\nvalue is the amount to boost it by. Boost amounts may be either an integer or a\nfloat.\n\nExample::\n\n    SearchQuerySet().filter(content='foo').boost('bar', 1.5)\n\n``facet``\n~~~~~~~~~\n\n.. method:: SearchQuerySet.facet(self, field, **options)\n\nAdds faceting to a query for the provided field. You provide the field (from one\nof the ``SearchIndex`` classes) you like to facet on. Any keyword options you\nprovide will be passed along to the backend for that facet.\n\nExample::\n\n    # For SOLR (setting f.author.facet.*; see http://wiki.apache.org/solr/SimpleFacetParameters#Parameters)\n    SearchQuerySet().facet('author', mincount=1, limit=10)\n    # For Elasticsearch (see http://www.elasticsearch.org/guide/reference/api/search/facets/terms-facet.html)\n    SearchQuerySet().facet('author', size=10, order='term')\n\nIn the search results you get back, facet counts will be populated in the\n``SearchResult`` object. You can access them via the ``facet_counts`` method.\n\nExample::\n\n    # Count document hits for each author within the index.\n    SearchQuerySet().filter(content='foo').facet('author')\n\n``date_facet``\n~~~~~~~~~~~~~~\n\n.. method:: SearchQuerySet.date_facet(self, field, start_date, end_date, gap_by, gap_amount=1)\n\nAdds faceting to a query for the provided field by date. You provide the field\n(from one of the ``SearchIndex`` classes) you like to facet on, a ``start_date``\n(either ``datetime.datetime`` or ``datetime.date``), an ``end_date`` and the\namount of time between gaps as ``gap_by`` (one of ``'year'``, ``'month'``,\n``'day'``, ``'hour'``, ``'minute'`` or ``'second'``).\n\nYou can also optionally provide a ``gap_amount`` to specify a different\nincrement than ``1``. For example, specifying gaps by week (every seven days)\nwould be ``gap_by='day', gap_amount=7``).\n\nIn the search results you get back, facet counts will be populated in the\n``SearchResult`` object. You can access them via the ``facet_counts`` method.\n\nExample::\n\n    # Count document hits for each day between 2009-06-07 to 2009-07-07 within the index.\n    SearchQuerySet().filter(content='foo').date_facet('pub_date', start_date=datetime.date(2009, 6, 7), end_date=datetime.date(2009, 7, 7), gap_by='day')\n\n``query_facet``\n~~~~~~~~~~~~~~~\n\n.. method:: SearchQuerySet.query_facet(self, field, query)\n\nAdds faceting to a query for the provided field with a custom query. You provide\nthe field (from one of the ``SearchIndex`` classes) you like to facet on and the\nbackend-specific query (as a string) you'd like to execute.\n\nPlease note that this is **NOT** portable between backends. The syntax is entirely\ndependent on the backend. No validation/cleansing is performed and it is up to\nthe developer to ensure the query's syntax is correct.\n\nIn the search results you get back, facet counts will be populated in the\n``SearchResult`` object. You can access them via the ``facet_counts`` method.\n\nExample::\n\n    # Count document hits for authors that start with 'jo' within the index.\n    SearchQuerySet().filter(content='foo').query_facet('author', 'jo*')\n\n``within``\n~~~~~~~~~~\n\n.. method:: SearchQuerySet.within(self, field, point_1, point_2):\n\nSpatial: Adds a bounding box search to the query.\n\nSee the :ref:`ref-spatial` docs for more information.\n\n``dwithin``\n~~~~~~~~~~~\n\n.. method:: SearchQuerySet.dwithin(self, field, point, distance):\n\nSpatial: Adds a distance-based search to the query.\n\nSee the :ref:`ref-spatial` docs for more information.\n\n``stats``\n~~~~~~~~~\n\n.. method:: SearchQuerySet.stats(self, field):\n\nAdds stats to a query for the provided field. This is supported on\nSolr only. You provide the field (from one of the ``SearchIndex``\nclasses) you would like stats on.\n\nIn the search results you get back, stats will be populated in the\n``SearchResult`` object. You can access them via the `` stats_results`` method.\n\nExample::\n\n    # Get stats on the author field.\n    SearchQuerySet().filter(content='foo').stats('author')\n\n``stats_facet``\n~~~~~~~~~~~~~~~\n.. method:: SearchQuerySet.stats_facet(self, field,\n.. facet_fields=None):\n\nAdds stats facet for the given field and facet_fields represents the\nfaceted fields. This is supported on Solr only.\n\nExample::\n\n    # Get stats on the author field, and stats on the author field\n    faceted by bookstore.\n    SearchQuerySet().filter(content='foo').stats_facet('author','bookstore')\n\n\n``distance``\n~~~~~~~~~~~~\n.. method:: SearchQuerySet.distance(self, field, point):\n\nSpatial: Denotes results must have distance measurements from the\nprovided point.\n\nSee the :ref:`ref-spatial` docs for more information.\n\n``narrow``\n~~~~~~~~~~\n\n.. method:: SearchQuerySet.narrow(self, query)\n\nPulls a subset of documents from the search engine to search within. This is\nfor advanced usage, especially useful when faceting.\n\nExample::\n\n    # Search, from recipes containing 'blend', for recipes containing 'banana'.\n    SearchQuerySet().narrow('blend').filter(content='banana')\n\n    # Using a fielded search where the recipe's title contains 'smoothie', find all recipes published before 2009.\n    SearchQuerySet().narrow('title:smoothie').filter(pub_date__lte=datetime.datetime(2009, 1, 1))\n\nBy using ``narrow``, you can create drill-down interfaces for faceting by\napplying ``narrow`` calls for each facet that gets selected.\n\nThis method is different from ``SearchQuerySet.filter()`` in that it does not\naffect the query sent to the engine. It pre-limits the document set being\nsearched. Generally speaking, if you're in doubt of whether to use\n``filter`` or ``narrow``, use ``filter``.\n\n.. note::\n\n    This method is, generally speaking, not necessarily portable between\n    backends. The syntax is entirely dependent on the backend, though most\n    backends have a similar syntax for basic fielded queries. No\n    validation/cleansing is performed and it is up to the developer to ensure\n    the query's syntax is correct.\n\n``raw_search``\n~~~~~~~~~~~~~~\n\n.. method:: SearchQuerySet.raw_search(self, query_string, **kwargs)\n\nPasses a raw query directly to the backend. This is for advanced usage, where\nthe desired query can not be expressed via ``SearchQuerySet``.\n\nThis method is still supported, however it now uses the much more flexible\n``Raw`` input type (:ref:`ref-inputtypes`).\n\n.. warning::\n\n    Different from Haystack 1.X, this method no longer causes immediate\n    evaluation & now chains appropriately.\n\nExample::\n\n    # In the case of Solr... (this example could be expressed with SearchQuerySet)\n    SearchQuerySet().raw_search('django_ct:blog.blogentry \"However, it is\"')\n\n    # Equivalent.\n    from haystack.inputs import Raw\n    sqs = SearchQuerySet().filter(content=Raw('django_ct:blog.blogentry \"However, it is\"'))\n\nPlease note that this is **NOT** portable between backends. The syntax is entirely\ndependent on the backend. No validation/cleansing is performed and it is up to\nthe developer to ensure the query's syntax is correct.\n\nFurther, the use of ``**kwargs`` are completely undocumented intentionally. If\na third-party backend can implement special features beyond what's present, it\nshould use those ``**kwargs`` for passing that information. Developers should\nbe careful to make sure there are no conflicts with the backend's ``search``\nmethod, as that is called directly.\n\n``load_all``\n~~~~~~~~~~~~\n\n.. method:: SearchQuerySet.load_all(self)\n\nEfficiently populates the objects in the search results. Without using this\nmethod, DB lookups are done on a per-object basis, resulting in many individual\ntrips to the database. If ``load_all`` is used, the ``SearchQuerySet`` will\ngroup similar objects into a single query, resulting in only as many queries as\nthere are different object types returned.\n\nExample::\n\n    SearchQuerySet().filter(content='foo').load_all()\n\n``auto_query``\n~~~~~~~~~~~~~~\n\n.. method:: SearchQuerySet.auto_query(self, query_string, fieldname=None)\n\nPerforms a best guess constructing the search query.\n\nThis method is intended for common use directly with a user's query. This\nmethod is still supported, however it now uses the much more flexible\n``AutoQuery`` input type (:ref:`ref-inputtypes`).\n\nIt handles exact matches (specified with single or double quotes), negation (\nusing a ``-`` immediately before the term) and joining remaining terms with the\noperator specified in ``HAYSTACK_DEFAULT_OPERATOR``.\n\nExample::\n\n    sqs = SearchQuerySet().auto_query('goldfish \"old one eye\" -tank')\n\n    # Equivalent.\n    from haystack.inputs import AutoQuery\n    sqs = SearchQuerySet().filter(content=AutoQuery('goldfish \"old one eye\" -tank'))\n\n    # Against a different field.\n    sqs = SearchQuerySet().filter(title=AutoQuery('goldfish \"old one eye\" -tank'))\n\n\n``autocomplete``\n~~~~~~~~~~~~~~~~\n\nA shortcut method to perform an autocomplete search.\n\nMust be run against fields that are either ``NgramField`` or\n``EdgeNgramField``.\n\nExample::\n\n    SearchQuerySet().autocomplete(title_autocomplete='gol')\n\n``more_like_this``\n~~~~~~~~~~~~~~~~~~\n\n.. method:: SearchQuerySet.more_like_this(self, model_instance)\n\nFinds similar results to the object passed in.\n\nYou should pass in an instance of a model (for example, one fetched via a\n``get`` in Django's ORM). This will execute a query on the backend that searches\nfor similar results. The instance you pass in should be an indexed object.\nPreviously called methods will have an effect on the provided results.\n\nIt will evaluate its own backend-specific query and populate the\n``SearchQuerySet`` in the same manner as other methods.\n\nExample::\n\n    entry = Entry.objects.get(slug='haystack-one-oh-released')\n    mlt = SearchQuerySet().more_like_this(entry)\n    mlt.count() # 5\n    mlt[0].object.title # \"Haystack Beta 1 Released\"\n\n    # ...or...\n    mlt = SearchQuerySet().filter(public=True).exclude(pub_date__lte=datetime.date(2009, 7, 21)).more_like_this(entry)\n    mlt.count() # 2\n    mlt[0].object.title # \"Haystack Beta 1 Released\"\n\n``using``\n~~~~~~~~~\n\n.. method:: SearchQuerySet.using(self, connection_name)\n\nAllows switching which connection the ``SearchQuerySet`` uses to search in.\n\nExample::\n\n    # Let the routers decide which connection to use.\n    sqs = SearchQuerySet().all()\n\n    # Specify the 'default'.\n    sqs = SearchQuerySet().all().using('default')\n\n\nMethods That Do Not Return A ``SearchQuerySet``\n-----------------------------------------------\n\n``count``\n~~~~~~~~~\n\n.. method:: SearchQuerySet.count(self)\n\nReturns the total number of matching results.\n\nThis returns an integer count of the total number of results the search backend\nfound that matched. This method causes the query to evaluate and run the search.\n\nExample::\n\n    SearchQuerySet().filter(content='foo').count()\n\n``best_match``\n~~~~~~~~~~~~~~\n\n.. method:: SearchQuerySet.best_match(self)\n\nReturns the best/top search result that matches the query.\n\nThis method causes the query to evaluate and run the search. This method returns\na ``SearchResult`` object that is the best match the search backend found::\n\n    foo = SearchQuerySet().filter(content='foo').best_match()\n    foo.id # Something like 5.\n\n    # Identical to:\n    foo = SearchQuerySet().filter(content='foo')[0]\n\n``latest``\n~~~~~~~~~~\n\n.. method:: SearchQuerySet.latest(self, date_field)\n\nReturns the most recent search result that matches the query.\n\nThis method causes the query to evaluate and run the search. This method returns\na ``SearchResult`` object that is the most recent match the search backend\nfound::\n\n    foo = SearchQuerySet().filter(content='foo').latest('pub_date')\n    foo.id # Something like 3.\n\n    # Identical to:\n    foo = SearchQuerySet().filter(content='foo').order_by('-pub_date')[0]\n\n``facet_counts``\n~~~~~~~~~~~~~~~~\n\n.. method:: SearchQuerySet.facet_counts(self)\n\nReturns the facet counts found by the query. This will cause the query to\nexecute and should generally be used when presenting the data (template-level).\n\nYou receive back a dictionary with three keys: ``fields``, ``dates`` and\n``queries``. Each contains the facet counts for whatever facets you specified\nwithin your ``SearchQuerySet``.\n\n.. note::\n\n    The resulting dictionary may change before 1.0 release. It's fairly\n    backend-specific at the time of writing. Standardizing is waiting on\n    implementing other backends that support faceting and ensuring that the\n    results presented will meet their needs as well.\n\nExample::\n\n    # Count document hits for each author.\n    sqs = SearchQuerySet().filter(content='foo').facet('author')\n\n    sqs.facet_counts()\n    # Gives the following response:\n    # {\n    #     'dates': {},\n    #     'fields': {\n    #         'author': [\n    #             ('john', 4),\n    #             ('daniel', 2),\n    #             ('sally', 1),\n    #             ('terry', 1),\n    #         ],\n    #     },\n    #     'queries': {}\n    # }\n\n``stats_results``\n~~~~~~~~~~~~~~~~~\n\n.. method:: SearchQuerySet.stats_results(self):\n\nReturns the stats results found by the query.\n\nThis will cause the query to execute and should generally be used when\npresenting the data (template-level).\n\nYou receive back a dictionary with three keys: ``fields``, ``dates`` and\n``queries``. Each contains the facet counts for whatever facets you specified\nwithin your ``SearchQuerySet``.\n\n.. note::\n\n    The resulting dictionary may change before 1.0 release. It's fairly\n    backend-specific at the time of writing. Standardizing is waiting on\n    implementing other backends that support faceting and ensuring that the\n    results presented will meet their needs as well.\n\nExample::\n\n    # Count document hits for each author.\n    sqs = SearchQuerySet().filter(content='foo').stats('price')\n\n    sqs.stats_results()\n\n    # Gives the following response\n    # {\n    #    'stats_fields':{\n    #       'author:{\n    #            'min': 0.0,\n    #            'max': 2199.0,\n    #            'sum': 5251.2699999999995,\n    #            'count': 15,\n    #            'missing': 11,\n    #            'sumOfSquares': 6038619.160300001,\n    #            'mean': 350.08466666666664,\n    #            'stddev': 547.737557906113\n    #        }\n    #    }\n    #\n    # }\n\n``set_spelling_query``\n~~~~~~~~~~~~~~~~~~~~~~~\n\n.. method:: SearchQuerySet.set_spelling_query(self, spelling_query)\n\nThis method allows you to set the text which will be passed to the backend search engine for spelling\nsuggestions. This is helpful when the actual query being sent to the backend has complex syntax which\nshould not be seen by the spelling suggestion component.\n\nIn this example, a Solr ``edismax`` query is being used to boost field and document weights and\n``set_spelling_query`` is being used to send only the actual user-entered text to the spellchecker::\n\n    alt_q = AltParser('edismax', self.query,\n                      qf='title^4 text provider^0.5',\n                      bq='django_ct:core.item^6.0')\n    sqs = sqs.filter(content=alt_q)\n    sqs = sqs.set_spelling_query(self.query)\n\n\n``spelling_suggestion``\n~~~~~~~~~~~~~~~~~~~~~~~\n\n.. method:: SearchQuerySet.spelling_suggestion(self, preferred_query=None)\n\nReturns the spelling suggestion found by the query.\n\nTo work, you must set ``INCLUDE_SPELLING`` within your connection's\nsettings dictionary to ``True``, and you must rebuild your index afterwards.\nOtherwise, ``None`` will be returned.\n\nThis method causes the query to evaluate and run the search if it hasn't already\nrun. Search results will be populated as normal but with an additional spelling\nsuggestion. Note that this does *NOT* run the revised query, only suggests\nimprovements.\n\nIf provided, the optional argument to this method lets you specify an alternate\nquery for the spelling suggestion to be run on. This is useful for passing along\na raw user-provided query, especially when there are many methods chained on the\n``SearchQuerySet``.\n\nExample::\n\n    sqs = SearchQuerySet().auto_query('mor exmples')\n    sqs.spelling_suggestion() # u'more examples'\n\n    # ...or...\n    suggestion = SearchQuerySet().spelling_suggestion('moar exmples')\n    suggestion # u'more examples'\n\n``values``\n~~~~~~~~~~\n\n.. method:: SearchQuerySet.values(self, *fields)\n\nReturns a list of dictionaries, each containing the key/value pairs for the\nresult, exactly like Django's ``ValuesQuerySet``.\n\nThis method causes the query to evaluate and run the search if it hasn't already\nrun.\n\nYou must provide a list of one or more fields as arguments. These fields will\nbe the ones included in the individual results.\n\nExample::\n\n    sqs = SearchQuerySet().auto_query('banana').values('title', 'description')\n\n\n``values_list``\n~~~~~~~~~~~~~~~\n\n.. method:: SearchQuerySet.values_list(self, *fields, **kwargs)\n\nReturns a list of field values as tuples, exactly like Django's\n``ValuesListQuerySet``.\n\nThis method causes the query to evaluate and run the search if it hasn't already\nrun.\n\nYou must provide a list of one or more fields as arguments. These fields will\nbe the ones included in the individual results.\n\nYou may optionally also provide a ``flat=True`` kwarg, which in the case of a\nsingle field being provided, will return a flat list of that field rather than\na list of tuples.\n\nExample::\n\n    sqs = SearchQuerySet().auto_query('banana').values_list('title', 'description')\n\n    # ...or just the titles as a flat list...\n    sqs = SearchQuerySet().auto_query('banana').values_list('title', flat=True)\n\n\n.. _field-lookups:\n\nField Lookups\n-------------\n\nThe following lookup types are supported:\n\n* content\n* contains\n* exact\n* gt\n* gte\n* lt\n* lte\n* in\n* startswith\n* endswith\n* range\n* fuzzy\n\nExcept for ``fuzzy`` these options are similar in function to the way Django's lookup types work.\nThe actual behavior of these lookups is backend-specific.\n\n.. warning::\n\n    The ``startswith`` filter is strongly affected by the other ways the engine\n    parses data, especially in regards to stemming (see :doc:`glossary`). This\n    can mean that if the query ends in a vowel or a plural form, it may get\n    stemmed before being evaluated.\n\n    This is both backend-specific and yet fairly consistent between engines,\n    and may be the cause of sometimes unexpected results.\n\n.. warning::\n\n    The ``content`` filter became the new default filter as of Haystack v2.X\n    (the default in Haystack v1.X was ``exact``). This changed because ``exact``\n    caused problems and was unintuitive for new people trying to use Haystack.\n    ``content`` is a much more natural usage.\n\n    If you had an app built on Haystack v1.X & are upgrading, you'll need to\n    sanity-check & possibly change any code that was relying on the default.\n    The solution is just to add ``__exact`` to any \"bare\" field in a\n    ``.filter(...)`` clause.\n\nExample::\n\n    SearchQuerySet().filter(content='foo')\n\n    # Identical to:\n    SearchQuerySet().filter(content__content='foo')\n\n    # Phrase matching.\n    SearchQuerySet().filter(content__exact='hello world')\n\n    # Other usages look like:\n    SearchQuerySet().filter(pub_date__gte=datetime.date(2008, 1, 1), pub_date__lt=datetime.date(2009, 1, 1))\n    SearchQuerySet().filter(author__in=['daniel', 'john', 'jane'])\n    SearchQuerySet().filter(view_count__range=[3, 5])\n\n\n``EmptySearchQuerySet``\n=======================\n\nAlso included in Haystack is an ``EmptySearchQuerySet`` class. It behaves just\nlike ``SearchQuerySet`` but will always return zero results. This is useful for\nplaces where you want no query to occur or results to be returned.\n\n\n``RelatedSearchQuerySet``\n=========================\n\nSometimes you need to filter results based on relations in the database that are\nnot present in the search index or are difficult to express that way. To this\nend, ``RelatedSearchQuerySet`` allows you to post-process the search results by\ncalling ``load_all_queryset``.\n\n.. warning::\n\n    ``RelatedSearchQuerySet`` can have negative performance implications.\n    Because results are excluded based on the database after the search query\n    has been run, you can't guarantee offsets within the cache. Therefore, the\n    entire cache that appears before the offset you request must be filled in\n    order to produce consistent results. On large result sets and at higher\n    slices, this can take time.\n\n    This is the old behavior of ``SearchQuerySet``, so performance is no worse\n    than the early days of Haystack.\n\nIt supports all other methods that the standard ``SearchQuerySet`` does, with\nthe addition of the ``load_all_queryset`` method and paying attention to the\n``load_all_queryset`` method of ``SearchIndex`` objects when populating the\ncache.\n\n``load_all_queryset``\n---------------------\n\n.. method:: RelatedSearchQuerySet.load_all_queryset(self, model_class, queryset)\n\nAllows for specifying a custom ``QuerySet`` that changes how ``load_all`` will\nfetch records for the provided model. This is useful for post-processing the\nresults from the query, enabling things like adding ``select_related`` or\nfiltering certain data.\n\nExample::\n\n    sqs = RelatedSearchQuerySet().filter(content='foo').load_all()\n    # For the Entry model, we want to include related models directly associated\n    # with the Entry to save on DB queries.\n    sqs = sqs.load_all_queryset(Entry, Entry.objects.all().select_related(depth=1))\n\nThis method chains indefinitely, so you can specify ``QuerySets`` for as many\nmodels as you wish, one per model. The ``SearchQuerySet`` appends on a call to\n``in_bulk``, so be sure that the ``QuerySet`` you provide can accommodate this\nand that the ids passed to ``in_bulk`` will map to the model in question.\n\nIf you need to do this frequently and have one ``QuerySet`` you'd like to apply\neverywhere, you can specify this at the ``SearchIndex`` level using the\n``load_all_queryset`` method. See :doc:`searchindex_api` for usage.\n"
  },
  {
    "path": "docs/searchresult_api.rst",
    "content": ".. _ref-searchresult-api:\n\n====================\n``SearchResult`` API\n====================\n\n.. class:: SearchResult(app_label, model_name, pk, score, **kwargs)\n\nThe ``SearchResult`` class provides structure to the results that come back from\nthe search index. These objects are what a ``SearchQuerySet`` will return when\nevaluated.\n\n\nAttribute Reference\n===================\n\nThe class exposes the following useful attributes/properties:\n\n* ``app_label`` - The application the model is attached to.\n* ``model_name`` - The model's name.\n* ``pk`` - The primary key of the model.\n* ``score`` - The score provided by the search engine.\n* ``object`` - The actual model instance (lazy loaded).\n* ``model`` - The model class.\n* ``verbose_name`` - A prettier version of the model's class name for display.\n* ``verbose_name_plural`` -  A prettier version of the model's *plural* class name for display.\n* ``searchindex`` - Returns the ``SearchIndex`` class associated with this\n  result.\n* ``distance`` - On geo-spatial queries, this returns a ``Distance`` object\n  representing the distance the result was from the focused point.\n\n\nMethod Reference\n================\n\n``content_type``\n----------------\n\n.. method:: SearchResult.content_type(self)\n\nReturns the content type for the result's model instance.\n\n``get_additional_fields``\n-------------------------\n\n.. method:: SearchResult.get_additional_fields(self)\n\nReturns a dictionary of all of the fields from the raw result.\n\nUseful for serializing results. Only returns what was seen from the\nsearch engine, so it may have extra fields Haystack's indexes aren't\naware of.\n\n``get_stored_fields``\n---------------------\n\n.. method:: SearchResult.get_stored_fields(self)\n\nReturns a dictionary of all of the stored fields from the SearchIndex.\n\nUseful for serializing results. Only returns the fields Haystack's\nindexes are aware of as being 'stored'.\n"
  },
  {
    "path": "docs/settings.rst",
    "content": ".. _ref-settings:\n\n=================\nHaystack Settings\n=================\n\nAs a way to extend/change the default behavior within Haystack, there are\nseveral settings you can alter within your ``settings.py``. This is a\ncomprehensive list of the settings Haystack recognizes.\n\n\n``HAYSTACK_DEFAULT_OPERATOR``\n=============================\n\n**Optional**\n\nThis setting controls what the default behavior for chaining ``SearchQuerySet``\nfilters together is.\n\nValid options are::\n\n    HAYSTACK_DEFAULT_OPERATOR = 'AND'\n    HAYSTACK_DEFAULT_OPERATOR = 'OR'\n\nDefaults to ``AND``.\n\n\n``HAYSTACK_CONNECTIONS``\n========================\n\n**Required**\n\nThis setting controls which backends should be available. It should be a\ndictionary of dictionaries resembling the following (complete) example::\n\n    HAYSTACK_CONNECTIONS = {\n        'default': {\n            'ENGINE': 'haystack.backends.solr_backend.SolrEngine',\n            'URL': 'http://localhost:9001/solr/default',\n            'TIMEOUT': 60 * 5,\n            'INCLUDE_SPELLING': True,\n            'BATCH_SIZE': 100,\n            'EXCLUDED_INDEXES': ['thirdpartyapp.search_indexes.BarIndex'],\n        },\n        'autocomplete': {\n            'ENGINE': 'haystack.backends.whoosh_backend.WhooshEngine',\n            'PATH': '/home/search/whoosh_index',\n            'STORAGE': 'file',\n            'POST_LIMIT': 128 * 1024 * 1024,\n            'INCLUDE_SPELLING': True,\n            'BATCH_SIZE': 100,\n            'EXCLUDED_INDEXES': ['thirdpartyapp.search_indexes.BarIndex'],\n        },\n        'slave': {\n            'ENGINE': 'xapian_backend.XapianEngine',\n            'PATH': '/home/search/xapian_index',\n            'INCLUDE_SPELLING': True,\n            'BATCH_SIZE': 100,\n            'EXCLUDED_INDEXES': ['thirdpartyapp.search_indexes.BarIndex'],\n        },\n        'db': {\n            'ENGINE': 'haystack.backends.simple_backend.SimpleEngine',\n            'EXCLUDED_INDEXES': ['thirdpartyapp.search_indexes.BarIndex'],\n        }\n    }\n\nNo default for this setting is provided.\n\nThe main keys (``default`` & friends) are identifiers for your application.\nYou can use them any place the API exposes ``using`` as a method or kwarg.\n\nThere must always be at least a ``default`` key within this setting.\n\nThe ``ENGINE`` option is required for all backends & should point to the\n``BaseEngine`` subclass for the backend.\n\nAdditionally, each backend may have additional options it requires:\n\n* Solr\n\n  * ``URL`` - The URL to the Solr core. e.g. http://localhost:9001/solr/collection1\n  * ``ADMIN_URL`` - The URL to the administrative functions. e.g.\n    http://localhost:9001/solr/admin/cores\n\n* Whoosh\n\n  * ``PATH`` - The filesystem path to where the index data is located.\n\n* Xapian\n\n  * ``PATH`` - The filesystem path to where the index data is located.\n\nThe following options are optional:\n\n* ``INCLUDE_SPELLING`` - Include spelling suggestions. Default is ``False``\n* ``BATCH_SIZE`` - How many records should be updated at once via the management\n  commands. Default is ``1000``.\n* ``TIMEOUT`` - (Solr and ElasticSearch) How long to wait (in seconds) before\n  the connection times out. Default is ``10``.\n* ``STORAGE`` - (Whoosh-only) Which storage engine to use. Accepts ``file`` or\n  ``ram``. Default is ``file``.\n* ``POST_LIMIT`` - (Whoosh-only) How large the file sizes can be. Default is\n  ``128 * 1024 * 1024``.\n* ``FLAGS`` - (Xapian-only) A list of flags to use when querying the index.\n* ``EXCLUDED_INDEXES`` - A list of strings (as Python import paths) to indexes\n  you do **NOT** want included. Useful for omitting third-party things you\n  don't want indexed or for when you want to replace an index.\n* ``KWARGS`` - (Solr and ElasticSearch) Any additional keyword arguments that\n  should be passed on to the underlying client library.\n\n\n``HAYSTACK_ROUTERS``\n====================\n\n**Optional**\n\nThis setting controls how routing is performed to allow different backends to\nhandle updates/deletes/reads.\n\nAn example::\n\n    HAYSTACK_ROUTERS = ['search_routers.MasterSlaveRouter', 'haystack.routers.DefaultRouter']\n\nDefaults to ``['haystack.routers.DefaultRouter']``.\n\n\n``HAYSTACK_SIGNAL_PROCESSOR``\n=============================\n\n**Optional**\n\nThis setting controls what ``SignalProcessor`` class is used to handle Django's\nsignals & keep the search index up-to-date.\n\nAn example::\n\n    HAYSTACK_SIGNAL_PROCESSOR = 'haystack.signals.RealtimeSignalProcessor'\n\nDefaults to ``'haystack.signals.BaseSignalProcessor'``.\n\n\n``HAYSTACK_DOCUMENT_FIELD``\n===========================\n\n**Optional**\n\nThis setting controls what fieldname Haystack relies on as the default field\nfor searching within.\n\nAn example::\n\n    HAYSTACK_DOCUMENT_FIELD = 'wall_o_text'\n\nDefaults to ``text``.\n\n\n``HAYSTACK_SEARCH_RESULTS_PER_PAGE``\n====================================\n\n**Optional**\n\nThis setting controls how many results are shown per page when using the\nincluded ``SearchView`` and its subclasses.\n\nAn example::\n\n    HAYSTACK_SEARCH_RESULTS_PER_PAGE = 50\n\nDefaults to ``20``.\n\n\n``HAYSTACK_CUSTOM_HIGHLIGHTER``\n===============================\n\n**Optional**\n\nThis setting allows you to specify your own custom ``Highlighter``\nimplementation for use with the ``{% highlight %}`` template tag. It should be\nthe full path to the class.\n\nAn example::\n\n    HAYSTACK_CUSTOM_HIGHLIGHTER = 'myapp.utils.BorkHighlighter'\n\nNo default is provided. Haystack automatically falls back to the default\nimplementation.\n\n\n``HAYSTACK_ITERATOR_LOAD_PER_QUERY``\n====================================\n\n**Optional**\n\nThis setting controls the number of results that are pulled at once when\niterating through a ``SearchQuerySet``. If you generally consume large portions\nat a time, you can bump this up for better performance.\n\n.. note::\n\n    This is not used in the case of a slice on a ``SearchQuerySet``, which\n    already overrides the number of results pulled at once.\n\nAn example::\n\n    HAYSTACK_ITERATOR_LOAD_PER_QUERY = 100\n\nThe default is 10 results at a time.\n\n\n``HAYSTACK_LIMIT_TO_REGISTERED_MODELS``\n=======================================\n\n**Optional**\n\nThis setting allows you to control whether or not Haystack will limit the\nsearch results seen to just the models registered. It should be a boolean.\n\nIf your search index is never used for anything other than the models\nregistered with Haystack, you can turn this off and get a small to moderate\nperformance boost.\n\nAn example::\n\n    HAYSTACK_LIMIT_TO_REGISTERED_MODELS = False\n\nDefault is ``True``.\n\n\n``HAYSTACK_ID_FIELD``\n=====================\n\n**Optional**\n\nThis setting allows you to control what the unique field name used internally\nby Haystack is called. Rarely needed unless your field names collide with\nHaystack's defaults.\n\nAn example::\n\n    HAYSTACK_ID_FIELD = 'my_id'\n\nDefault is ``id``.\n\n\n``HAYSTACK_DJANGO_CT_FIELD``\n============================\n\n**Optional**\n\nThis setting allows you to control what the content type field name used\ninternally by Haystack is called. Rarely needed unless your field names\ncollide with Haystack's defaults.\n\nAn example::\n\n    HAYSTACK_DJANGO_CT_FIELD = 'my_django_ct'\n\nDefault is ``django_ct``.\n\n\n``HAYSTACK_DJANGO_ID_FIELD``\n============================\n\n**Optional**\n\nThis setting allows you to control what the primary key field name used\ninternally by Haystack is called. Rarely needed unless your field names\ncollide with Haystack's defaults.\n\nAn example::\n\n    HAYSTACK_DJANGO_ID_FIELD = 'my_django_id'\n\nDefault is ``django_id``.\n\n\n``HAYSTACK_IDENTIFIER_METHOD``\n==============================\n\n**Optional**\n\nThis setting allows you to provide a custom method for\n``haystack.utils.get_identifier``. Useful when the default identifier\npattern of <app.label>.<object_name>.<pk> isn't suited to your\nneeds.\n\nAn example::\n\n    HAYSTACK_IDENTIFIER_METHOD = 'my_app.module.get_identifier'\n\nDefault is ``haystack.utils.default_get_identifier``.\n\n\n``HAYSTACK_FUZZY_MIN_SIM``\n==========================\n\n**Optional**\n\nThis setting allows you to change the required similarity when using ``fuzzy``\nfilter.\n\nDefault is ``0.5``\n\n\n``HAYSTACK_FUZZY_MAX_EXPANSIONS``\n=================================\n\n**Optional**\n\nThis setting allows you to change the number of terms fuzzy queries will\nexpand to when using ``fuzzy`` filter.\n\nDefault is ``50``\n"
  },
  {
    "path": "docs/signal_processors.rst",
    "content": ".. _ref-signal_processors:\n\n=================\nSignal Processors\n=================\n\nKeeping data in sync between the (authoritative) database & the\n(non-authoritative) search index is one of the more difficult problems when\nusing Haystack. Even frequently running the ``update_index`` management command\nstill introduces lag between when the data is stored & when it's available\nfor searching.\n\nA solution to this is to incorporate Django's signals (specifically\n``models.db.signals.post_save`` & ``models.db.signals.post_delete``), which then\ntrigger *individual* updates to the search index, keeping them in near-perfect\nsync.\n\nOlder versions of Haystack (pre-v2.0) tied the ``SearchIndex`` directly to the\nsignals, which caused occasional conflicts of interest with third-party\napplications.\n\nTo solve this, starting with Haystack v2.0, the concept of a ``SignalProcessor``\nhas been introduced. In it's simplest form, the ``SignalProcessor`` listens\nto whatever signals are setup & can be configured to then trigger the updates\nwithout having to change any ``SearchIndex`` code.\n\n.. warning::\n\n    Incorporating Haystack's ``SignalProcessor`` into your setup **will**\n    increase the overall load (CPU & perhaps I/O depending on configuration).\n    You will need to capacity plan for this & ensure you can make the tradeoff\n    of more real-time results for increased load.\n\n\nDefault - ``BaseSignalProcessor``\n=================================\n\nThe default setup is configured to use the\n``haystack.signals.BaseSignalProcessor`` class, which includes all the\nunderlying code necessary to handle individual updates/deletes, **BUT DOES NOT\nHOOK UP THE SIGNALS**.\n\nThis means that, by default, **NO ACTION IS TAKEN BY HAYSTACK** when a model is\nsaved or deleted. The ``BaseSignalProcessor.setup`` &\n``BaseSignalProcessor.teardown`` methods are both empty to prevent anything\nfrom being setup at initialization time.\n\nThis usage is configured very simply (again, by default) with the\n``HAYSTACK_SIGNAL_PROCESSOR`` setting. An example of manually setting this\nwould look like::\n\n    HAYSTACK_SIGNAL_PROCESSOR = 'haystack.signals.BaseSignalProcessor'\n\nThis class forms an excellent base if you'd like to override/extend for more\nadvanced behavior. Which leads us to...\n\n\nRealtime - ``RealtimeSignalProcessor``\n======================================\n\nThe other included ``SignalProcessor`` is the\n``haystack.signals.RealtimeSignalProcessor`` class. It is an extremely thin\nextension of the ``BaseSignalProcessor`` class, differing only in that\nin implements the ``setup/teardown`` methods, tying **ANY** Model\n``save/delete`` to the signal processor.\n\nIf the model has an associated ``SearchIndex``, the ``RealtimeSignalProcessor``\nwill then trigger an update/delete of that model instance within the search\nindex proper.\n\nConfiguration looks like::\n\n    HAYSTACK_SIGNAL_PROCESSOR = 'haystack.signals.RealtimeSignalProcessor'\n\nThis causes **all** ``SearchIndex`` classes to work in a realtime fashion.\n\n.. note::\n\n    These updates happen in-process, which if a request-response cycle is\n    involved, may cause the user with the browser to sit & wait for indexing to\n    be completed. Since this wait can be undesirable, especially under load,\n    you may wish to look into queued search options. See the\n    :ref:`ref-other_apps` documentation for existing options.\n\n\nCustom ``SignalProcessors``\n===========================\n\nThe ``BaseSignalProcessor`` & ``RealtimeSignalProcessor`` classes are fairly\nsimple/straightforward to customize or extend. Rather than forking Haystack to\nimplement your modifications, you should create your own subclass within your\ncodebase (anywhere that's importable is usually fine, though you should avoid\n``models.py`` files).\n\nFor instance, if you only wanted ``User`` saves to be realtime, deferring all\nother updates to the management commands, you'd implement the following code::\n\n    from django.contrib.auth.models import User\n    from django.db import models\n    from haystack import signals\n\n\n    class UserOnlySignalProcessor(signals.BaseSignalProcessor):\n        def setup(self):\n            # Listen only to the ``User`` model.\n            models.signals.post_save.connect(self.handle_save, sender=User)\n            models.signals.post_delete.connect(self.handle_delete, sender=User)\n\n        def teardown(self):\n            # Disconnect only for the ``User`` model.\n            models.signals.post_save.disconnect(self.handle_save, sender=User)\n            models.signals.post_delete.disconnect(self.handle_delete, sender=User)\n\nFor other customizations (modifying how saves/deletes should work), you'll need\nto override/extend the ``handle_save/handle_delete`` methods. The source code\nis your best option for referring to how things currently work on your version\nof Haystack.\n"
  },
  {
    "path": "docs/spatial.rst",
    "content": ".. _ref-spatial:\n\n==============\nSpatial Search\n==============\n\nSpatial search (also called geospatial search) allows you to take data that\nhas a geographic location & enhance the search results by limiting them to a\nphysical area. Haystack, combined with the latest versions of a couple engines,\ncan provide this type of search.\n\nIn addition, Haystack tries to implement these features in a way that is as\nclose to GeoDjango_ as possible. There are some differences, which we'll\nhighlight throughout this guide. Additionally, while the support isn't as\ncomprehensive as PostGIS (for example), it is still quite useful.\n\n.. _GeoDjango: https://docs.djangoproject.com/en/1.11/ref/contrib/gis/\n\n\nAdditional Requirements\n=======================\n\nThe spatial functionality has only one non-included, non-available-in-Django\ndependency:\n\n* ``geopy`` - ``pip install geopy``\n\nIf you do not ever need distance information, you may be able to skip\ninstalling ``geopy``.\n\n\nSupport\n=======\n\nYou need the latest & greatest of either Solr or Elasticsearch. None of the\nother backends (specifically the engines) support this kind of search.\n\nFor Solr_, you'll need at least **v3.5+**. In addition, if you have an existing\ninstall of Haystack & Solr, you'll need to upgrade the schema & reindex your\ndata. If you're adding geospatial data, you would have to reindex anyhow.\n\nFor Elasticsearch, you'll need at least v0.17.7, preferably v0.18.6 or better.\nIf you're adding geospatial data, you'll have to reindex as well.\n\n.. _Solr: http://lucene.apache.org/solr/\n\n====================== ====== =============== ======== ======== ======\nLookup Type            Solr   Elasticsearch   Whoosh   Xapian   Simple\n====================== ====== =============== ======== ======== ======\n`within`               X      X\n`dwithin`              X      X\n`distance`             X      X\n`order_by('distance')` X      X\n`polygon`                     X\n====================== ====== =============== ======== ======== ======\n\nFor more details, you can inspect http://wiki.apache.org/solr/SpatialSearch\nor http://www.elasticsearch.org/guide/reference/query-dsl/geo-bounding-box-filter.html.\n\n\nGeospatial Assumptions\n======================\n\n``Points``\n----------\n\nHaystack prefers to work with ``Point`` objects, which are located in\n``django.contrib.gis.geos.Point``.\n\n``Point`` objects use **LONGITUDE, LATITUDE** for their construction, regardless\nif you use the parameters to instantiate them or WKT_/``GEOSGeometry``.\n\n.. _WKT: http://en.wikipedia.org/wiki/Well-known_text\n\nExamples::\n\n    # Using positional arguments.\n    from django.contrib.gis.geos import Point\n    pnt = Point(-95.23592948913574, 38.97127105172941)\n\n    # Using WKT.\n    from django.contrib.gis.geos import GEOSGeometry\n    pnt = GEOSGeometry('POINT(-95.23592948913574 38.97127105172941)')\n\nThey are preferred over just providing ``latitude, longitude`` because they are\nmore intelligent, have a spatial reference system attached & are more consistent\nwith GeoDjango's use.\n\n\n``Distance``\n------------\n\nHaystack also uses the ``D`` (or ``Distance``) objects from GeoDjango,\nimplemented in ``django.contrib.gis.measure.Distance``.\n\n``Distance`` objects accept a very flexible set of measurements during\ninstantiaton and can convert amongst them freely. This is important, because\nthe engines rely on measurements being in kilometers but you're free to use\nwhatever units you want.\n\nExamples::\n\n    from django.contrib.gis.measure import D\n\n    # Start at 5 miles.\n    imperial_d = D(mi=5)\n\n    # Convert to fathoms...\n    fathom_d = imperial_d.fathom\n\n    # Now to kilometers...\n    km_d = imperial_d.km\n\n    # And back to miles.\n    mi = imperial_d.mi\n\nThey are preferred over just providing a raw distance because they are\nmore intelligent, have a well-defined unit system attached & are consistent\nwith GeoDjango's use.\n\n\n``WGS-84``\n----------\n\nAll engines assume WGS-84 (SRID 4326). At the time of writing, there does **not**\nappear to be a way to switch this. Haystack will transform all points into this\ncoordinate system for you.\n\n\nIndexing\n========\n\nIndexing is relatively simple. Simply add a ``LocationField`` (or several)\nonto your ``SearchIndex`` class(es) & provide them a ``Point`` object. For\nexample::\n\n    from haystack import indexes\n    from shops.models import Shop\n\n\n    class ShopIndex(indexes.SearchIndex, indexes.Indexable):\n        text = indexes.CharField(document=True, use_template=True)\n        # ... the usual, then...\n        location = indexes.LocationField(model_attr='coordinates')\n\n        def get_model(self):\n            return Shop\n\nIf you must manually prepare the data, you have to do something slightly less\nconvenient, returning a string-ified version of the coordinates in WGS-84 as\n``lat,long``::\n\n    from haystack import indexes\n    from shops.models import Shop\n\n\n    class ShopIndex(indexes.SearchIndex, indexes.Indexable):\n        text = indexes.CharField(document=True, use_template=True)\n        # ... the usual, then...\n        location = indexes.LocationField()\n\n        def get_model(self):\n            return Shop\n\n        def prepare_location(self, obj):\n            # If you're just storing the floats...\n            return \"%s,%s\" % (obj.latitude, obj.longitude)\n\nAlternatively, you could build a method/property onto the ``Shop`` model that\nreturns a ``Point`` based on those coordinates::\n\n    # shops/models.py\n    from django.contrib.gis.geos import Point\n    from django.db import models\n\n\n    class Shop(models.Model):\n        # ... the usual, then...\n        latitude = models.FloatField()\n        longitude = models.FloatField()\n\n        # Usual methods, then...\n        def get_location(self):\n            # Remember, longitude FIRST!\n            return Point(self.longitude, self.latitude)\n\n\n    # shops/search_indexes.py\n    from haystack import indexes\n    from shops.models import Shop\n\n\n    class ShopIndex(indexes.SearchIndex, indexes.Indexable):\n        text = indexes.CharField(document=True, use_template=True)\n        location = indexes.LocationField(model_attr='get_location')\n\n        def get_model(self):\n            return Shop\n\n\nQuerying\n========\n\nThere are two types of geospatial queries you can run, ``within`` & ``dwithin``.\nLike their GeoDjango counterparts (within_ & dwithin_), these methods focus on\nfinding results within an area.\n\n.. _within: https://docs.djangoproject.com/en/dev/ref/contrib/gis/geoquerysets/#within\n.. _dwithin: https://docs.djangoproject.com/en/dev/ref/contrib/gis/geoquerysets/#dwithin\n\n\n``within``\n----------\n\n.. method:: SearchQuerySet.within(self, field, point_1, point_2)\n\n``within`` is a bounding box comparison. A bounding box is a rectangular area\nwithin which to search. It's composed of a bottom-left point & a top-right\npoint. It is faster but slighty sloppier than its counterpart.\n\nExamples::\n\n    from haystack.query import SearchQuerySet\n    from django.contrib.gis.geos import Point\n\n    downtown_bottom_left = Point(-95.23947, 38.9637903)\n    downtown_top_right = Point(-95.23362278938293, 38.973081081164715)\n\n    # 'location' is the fieldname from our ``SearchIndex``...\n\n    # Do the bounding box query.\n    sqs = SearchQuerySet().within('location', downtown_bottom_left, downtown_top_right)\n\n    # Can be chained with other Haystack calls.\n    sqs = SearchQuerySet().auto_query('coffee').within('location', downtown_bottom_left, downtown_top_right).order_by('-popularity')\n\n.. note::\n\n    In GeoDjango, assuming the ``Shop`` model had been properly geo-ified, this\n    would have been implemented as::\n\n        from shops.models import Shop\n        Shop.objects.filter(location__within=(downtown_bottom_left, downtown_top_right))\n\n    Haystack's form differs because it yielded a cleaner implementation, was\n    no more typing than the GeoDjango version & tried to maintain the same\n    terminology/similar signature.\n\n\n``dwithin``\n-----------\n\n.. method:: SearchQuerySet.dwithin(self, field, point, distance)\n\n``dwithin`` is a radius-based search. A radius-based search is a circular area\nwithin which to search. It's composed of a center point & a radius (in\nkilometers, though Haystack will use the ``D`` object's conversion utilities to\nget it there). It is slower than``within`` but very exact & can involve fewer\ncalculations on your part.\n\nExamples::\n\n    from haystack.query import SearchQuerySet\n    from django.contrib.gis.geos import Point, D\n\n    ninth_and_mass = Point(-95.23592948913574, 38.96753407043678)\n    # Within a two miles.\n    max_dist = D(mi=2)\n\n    # 'location' is the fieldname from our ``SearchIndex``...\n\n    # Do the radius query.\n    sqs = SearchQuerySet().dwithin('location', ninth_and_mass, max_dist)\n\n    # Can be chained with other Haystack calls.\n    sqs = SearchQuerySet().auto_query('coffee').dwithin('location', ninth_and_mass, max_dist).order_by('-popularity')\n\n.. note::\n\n    In GeoDjango, assuming the ``Shop`` model had been properly geo-ified, this\n    would have been implemented as::\n\n        from shops.models import Shop\n        Shop.objects.filter(location__dwithin=(ninth_and_mass, D(mi=2)))\n\n    Haystack's form differs because it yielded a cleaner implementation, was\n    no more typing than the GeoDjango version & tried to maintain the same\n    terminology/similar signature.\n\n\n``distance``\n------------\n\n.. method:: SearchQuerySet.distance(self, field, point)\n\nBy default, search results will come back without distance information attached\nto them. In the concept of a bounding box, it would be ambiguous what the\ndistances would be calculated against. And it is more calculation that may not\nbe necessary.\n\nSo like GeoDjango, Haystack exposes a method to signify that you want to\ninclude these calculated distances on results.\n\nExamples::\n\n    from haystack.query import SearchQuerySet\n    from django.contrib.gis.geos import Point, D\n\n    ninth_and_mass = Point(-95.23592948913574, 38.96753407043678)\n\n    # On a bounding box...\n    downtown_bottom_left = Point(-95.23947, 38.9637903)\n    downtown_top_right = Point(-95.23362278938293, 38.973081081164715)\n\n    sqs = SearchQuerySet().within('location', downtown_bottom_left, downtown_top_right).distance('location', ninth_and_mass)\n\n    # ...Or on a radius query.\n    sqs = SearchQuerySet().dwithin('location', ninth_and_mass, D(mi=2)).distance('location', ninth_and_mass)\n\nYou can even apply a different field, for instance if you calculate results of\nkey, well-cached hotspots in town but want distances from the user's current\nposition::\n\n    from haystack.query import SearchQuerySet\n    from django.contrib.gis.geos import Point, D\n\n    ninth_and_mass = Point(-95.23592948913574, 38.96753407043678)\n    user_loc = Point(-95.23455619812012, 38.97240128290697)\n\n    sqs = SearchQuerySet().dwithin('location', ninth_and_mass, D(mi=2)).distance('location', user_loc)\n\n.. note::\n\n    The astute will notice this is Haystack's biggest departure from GeoDjango.\n    In GeoDjango, this would have been implemented as::\n\n        from shops.models import Shop\n        Shop.objects.filter(location__dwithin=(ninth_and_mass, D(mi=2))).distance(user_loc)\n\n    Note that, by default, the GeoDjango form leaves *out* the field to be\n    calculating against (though it's possible to override it & specify the\n    field).\n\n    Haystack's form differs because the same assumptions are difficult to make.\n    GeoDjango deals with a single model at a time, where Haystack deals with\n    a broad mix of models. Additionally, accessing ``Model`` information is a\n    couple hops away, so Haystack favors the explicit (if slightly more typing)\n    approach.\n\n\nOrdering\n========\n\nBecause you're dealing with search, even with geospatial queries, results still\ncome back in **RELEVANCE** order. If you want to offer the user ordering\nresults by distance, there's a simple way to enable this ordering.\n\nUsing the standard Haystack ``order_by`` method, if you specify ``distance`` or\n``-distance`` **ONLY**, you'll get geographic ordering. Additionally, you must\nhave a call to ``.distance()`` somewhere in the chain, otherwise there is no\ndistance information on the results & nothing to sort by.\n\nExamples::\n\n    from haystack.query import SearchQuerySet\n    from django.contrib.gis.geos import Point, D\n\n    ninth_and_mass = Point(-95.23592948913574, 38.96753407043678)\n    downtown_bottom_left = Point(-95.23947, 38.9637903)\n    downtown_top_right = Point(-95.23362278938293, 38.973081081164715)\n\n    # Non-geo ordering.\n    sqs = SearchQuerySet().within('location', downtown_bottom_left, downtown_top_right).order_by('title')\n    sqs = SearchQuerySet().within('location', downtown_bottom_left, downtown_top_right).distance('location', ninth_and_mass).order_by('-created')\n\n    # Geo ordering, closest to farthest.\n    sqs = SearchQuerySet().within('location', downtown_bottom_left, downtown_top_right).distance('location', ninth_and_mass).order_by('distance')\n    # Geo ordering, farthest to closest.\n    sqs = SearchQuerySet().dwithin('location', ninth_and_mass, D(mi=2)).distance('location', ninth_and_mass).order_by('-distance')\n\n.. note::\n\n    This call is identical to the GeoDjango usage.\n\n.. warning::\n\n    You can not specify both a distance & lexicographic ordering. If you specify\n    more than just ``distance`` or ``-distance``, Haystack assumes ``distance``\n    is a field in the index & tries to sort on it. Example::\n\n        # May blow up!\n        sqs = SearchQuerySet().dwithin('location', ninth_and_mass, D(mi=2)).distance('location', ninth_and_mass).order_by('distance', 'title')\n\n    This is a limitation in the engine's implementation.\n\n    If you actually **have** a field called ``distance`` (& aren't using\n    calculated distance information), Haystack will do the right thing in\n    these circumstances.\n\n\nCaveats\n=======\n\nIn all cases, you may call the ``within/dwithin/distance`` methods as many times\nas you like. However, the **LAST** call is the information that will be used.\nNo combination logic is available, as this is largely a backend limitation.\n\nCombining calls to both ``within`` & ``dwithin`` may yield unexpected or broken\nresults. They don't overlap when performing queries, so it may be possible to\nconstruct queries that work. Your Mileage May Vary.\n"
  },
  {
    "path": "docs/templatetags.rst",
    "content": ".. _ref-templatetags:\n\n=============\nTemplate Tags\n=============\n\nHaystack comes with a couple common template tags to make using some of its\nspecial features available to templates.\n\n\n``highlight``\n=============\n\nTakes a block of text and highlights words from a provided query within that\nblock of text. Optionally accepts arguments to provide the HTML tag to wrap \nhighlighted word in, a CSS class to use with the tag and a maximum length of\nthe blurb in characters.\n\nThe defaults are ``span`` for the HTML tag, ``highlighted`` for the CSS class\nand 200 characters for the excerpt.\n\nSyntax::\n\n    {% highlight <text_block> with <query> [css_class \"class_name\"] [html_tag \"span\"] [max_length 200] %}\n\nExample::\n\n    # Highlight summary with default behavior.\n    {% highlight result.summary with query %}\n    \n    # Highlight summary but wrap highlighted words with a div and the\n    # following CSS class.\n    {% highlight result.summary with query html_tag \"div\" css_class \"highlight_me_please\" %}\n    \n    # Highlight summary but only show 40 words.\n    {% highlight result.summary with query max_length 40 %}\n\nThe highlighter used by this tag can be overridden as needed. See the\n:doc:`highlighting` documentation for more information.\n\n\n``more_like_this``\n==================\n\nFetches similar items from the search index to find content that is similar\nto the provided model's content.\n\n.. note::\n\n    This requires a backend that has More Like This built-in.\n\nSyntax::\n\n    {% more_like_this model_instance as varname [for app_label.model_name,app_label.model_name,...] [limit n] %}\n\nExample::\n\n    # Pull a full SearchQuerySet (lazy loaded) of similar content.\n    {% more_like_this entry as related_content %}\n    \n    # Pull just the top 5 similar pieces of content.\n    {% more_like_this entry as related_content limit 5  %}\n    \n    # Pull just the top 5 similar entries or comments.\n    {% more_like_this entry as related_content for \"blog.entry,comments.comment\" limit 5  %}\n\nThis tag behaves exactly like ``SearchQuerySet.more_like_this``, so all notes in\nthat regard apply here as well.\n"
  },
  {
    "path": "docs/toc.rst",
    "content": "Table Of Contents\n=================\n\n.. toctree::\n   :maxdepth: 2\n\n   index\n   tutorial\n   glossary\n   views_and_forms\n   templatetags\n   management_commands\n   architecture_overview\n   backend_support\n   installing_search_engines\n   settings\n   faq\n   who_uses\n   other_apps\n   debugging\n\n   migration_from_1_to_2\n   python3\n   contributing\n\n   best_practices\n   highlighting\n   faceting\n   autocomplete\n   boost\n   signal_processors\n   multiple_index\n   rich_content_extraction\n   spatial\n\n   searchqueryset_api\n   searchindex_api\n   inputtypes\n   searchfield_api\n   searchresult_api\n   searchquery_api\n   searchbackend_api\n\n   running_tests\n   creating_new_backends\n   utils\n\n\nIndices and tables\n==================\n\n* :ref:`search`\n\n"
  },
  {
    "path": "docs/tutorial.rst",
    "content": ".. _ref-tutorial:\n\n=============================\nGetting Started with Haystack\n=============================\n\nSearch is a topic of ever increasing importance. Users increasing rely on search\nto separate signal from noise and find what they're looking for quickly. In\naddition, search can provide insight into what things are popular (many\nsearches), what things are difficult to find on the site and ways you can\nimprove the site.\n\nTo this end, Haystack tries to make integrating custom search as easy as\npossible while being flexible/powerful enough to handle more advanced use cases.\n\nHaystack is a reusable app (that is, it relies only on its own code and focuses\non providing just search) that plays nicely with both apps you control as well as\nthird-party apps (such as ``django.contrib.*``) without having to modify the\nsources.\n\nHaystack also does pluggable backends (much like Django's database\nlayer), so virtually all of the code you write ought to be portable between\nwhichever search engine you choose.\n\n.. note::\n\n    If you hit a stumbling block, there is both a `mailing list`_ and\n    `#haystack on irc.freenode.net`_ to get help.\n\n.. note::\n\n   You can participate in and/or track the development of Haystack by\n   subscribing to the `development mailing list`_.\n\n.. _mailing list: http://groups.google.com/group/django-haystack\n.. _#haystack on irc.freenode.net: irc://irc.freenode.net/haystack\n.. _development mailing list: http://groups.google.com/group/django-haystack-dev\n\nThis tutorial assumes that you have a basic familiarity with the various major\nparts of Django (models/forms/views/settings/URLconfs) and tailored to the\ntypical use case. There are shortcuts available as well as hooks for much\nmore advanced setups, but those will not be covered here.\n\nFor example purposes, we'll be adding search functionality to a simple\nnote-taking application. Here is ``myapp/models.py``::\n\n    from django.db import models\n    from django.contrib.auth.models import User\n\n\n    class Note(models.Model):\n        user = models.ForeignKey(User)\n        pub_date = models.DateTimeField()\n        title = models.CharField(max_length=200)\n        body = models.TextField()\n\n        def __unicode__(self):\n            return self.title\n\nFinally, before starting with Haystack, you will want to choose a search\nbackend to get started. There is a quick-start guide to\n:doc:`installing_search_engines`, though you may want to defer to each engine's\nofficial instructions.\n\n\nInstallation\n=============\n\nUse your favorite Python package manager to install the app from PyPI, e.g.\n\nExample::\n\n    pip install django-haystack\n\nWhen using elasticsearch, use::\n\n    pip install \"django-haystack[elasticsearch]\"\n\nConfiguration\n=============\n\nAdd Haystack To ``INSTALLED_APPS``\n----------------------------------\n\nAs with most Django applications, you should add Haystack to the\n``INSTALLED_APPS`` within your settings file (usually ``settings.py``).\n\nExample::\n\n    INSTALLED_APPS = [\n        'django.contrib.admin',\n        'django.contrib.auth',\n        'django.contrib.contenttypes',\n        'django.contrib.sessions',\n        'django.contrib.sites',\n\n        # Added.\n        'haystack',\n\n        # Then your usual apps...\n        'blog',\n    ]\n\n\nModify Your ``settings.py``\n---------------------------\n\nWithin your ``settings.py``, you'll need to add a setting to indicate where your\nsite configuration file will live and which backend to use, as well as other\nsettings for that backend.\n\n``HAYSTACK_CONNECTIONS`` is a required setting and should be at least one of\nthe following:\n\nSolr\n~~~~\n\nExample (Solr 4.X)::\n\n    HAYSTACK_CONNECTIONS = {\n        'default': {\n            'ENGINE': 'haystack.backends.solr_backend.SolrEngine',\n            'URL': 'http://127.0.0.1:8983/solr'\n            # ...or for multicore...\n            # 'URL': 'http://127.0.0.1:8983/solr/mysite',\n        },\n    }\n\nExample (Solr 6.X)::\n\n    HAYSTACK_CONNECTIONS = {\n        'default': {\n            'ENGINE': 'haystack.backends.solr_backend.SolrEngine',\n            'URL': 'http://127.0.0.1:8983/solr/tester',                 # Assuming you created a core named 'tester' as described in installing search engines.\n            'ADMIN_URL': 'http://127.0.0.1:8983/solr/admin/cores'\n            # ...or for multicore...\n            # 'URL': 'http://127.0.0.1:8983/solr/mysite',\n        },\n    }\n\nElasticsearch\n~~~~~~~~~~~~~\n\nExample (ElasticSearch 1.x)::\n\n    HAYSTACK_CONNECTIONS = {\n        'default': {\n            'ENGINE': 'haystack.backends.elasticsearch_backend.ElasticsearchSearchEngine',\n            'URL': 'http://127.0.0.1:9200/',\n            'INDEX_NAME': 'haystack',\n        },\n    }\n\nExample (ElasticSearch 2.x)::\n\n    HAYSTACK_CONNECTIONS = {\n        'default': {\n            'ENGINE': 'haystack.backends.elasticsearch2_backend.Elasticsearch2SearchEngine',\n            'URL': 'http://127.0.0.1:9200/',\n            'INDEX_NAME': 'haystack',\n        },\n    }\n    \nExample (ElasticSearch 5.x)::\n\n    HAYSTACK_CONNECTIONS = {\n        'default': {\n            'ENGINE': 'haystack.backends.elasticsearch5_backend.Elasticsearch5SearchEngine',\n            'URL': 'http://127.0.0.1:9200/',\n            'INDEX_NAME': 'haystack',\n        },\n    }\n\nWhoosh\n~~~~~~\n\nRequires setting ``PATH`` to the place on your filesystem where the\nWhoosh index should be located. Standard warnings about permissions and keeping\nit out of a place your webserver may serve documents out of apply.\n\nExample::\n\n    import os\n    HAYSTACK_CONNECTIONS = {\n        'default': {\n            'ENGINE': 'haystack.backends.whoosh_backend.WhooshEngine',\n            'PATH': os.path.join(os.path.dirname(__file__), 'whoosh_index'),\n        },\n    }\n\n\nXapian\n~~~~~~\n\nFirst, install the Xapian backend (via\nhttp://github.com/notanumber/xapian-haystack/tree/master) per the instructions\nincluded with the backend.\n\nRequires setting ``PATH`` to the place on your filesystem where the\nXapian index should be located. Standard warnings about permissions and keeping\nit out of a place your webserver may serve documents out of apply.\n\nExample::\n\n    import os\n    HAYSTACK_CONNECTIONS = {\n        'default': {\n            'ENGINE': 'xapian_backend.XapianEngine',\n            'PATH': os.path.join(os.path.dirname(__file__), 'xapian_index'),\n        },\n    }\n\n\nSimple\n~~~~~~\n\nThe ``simple`` backend using very basic matching via the database itself. It's\nnot recommended for production use but it will return results.\n\n.. warning::\n\n    This backend does *NOT* work like the other backends do. Data preparation\n    does nothing & advanced filtering calls do not work. You really probably\n    don't want this unless you're in an environment where you just want to\n    silence Haystack.\n\nExample::\n\n    HAYSTACK_CONNECTIONS = {\n        'default': {\n            'ENGINE': 'haystack.backends.simple_backend.SimpleEngine',\n        },\n    }\n\n\nHandling Data\n=============\n\nCreating ``SearchIndexes``\n--------------------------\n\n``SearchIndex`` objects are the way Haystack determines what data should be\nplaced in the search index and handles the flow of data in. You can think of\nthem as being similar to Django ``Models`` or ``Forms`` in that they are\nfield-based and manipulate/store data.\n\nYou generally create a unique ``SearchIndex`` for each type of ``Model`` you\nwish to index, though you can reuse the same ``SearchIndex`` between different\nmodels if you take care in doing so and your field names are very standardized.\n\nTo build a ``SearchIndex``, all that's necessary is to subclass both\n``indexes.SearchIndex`` & ``indexes.Indexable``,\ndefine the fields you want to store data with and define a ``get_model`` method.\n\nWe'll create the following ``NoteIndex`` to correspond to our ``Note``\nmodel. This code generally goes in a ``search_indexes.py`` file within the app\nit applies to, though that is not required. This allows\nHaystack to automatically pick it up. The ``NoteIndex`` should look like::\n\n    import datetime\n    from haystack import indexes\n    from myapp.models import Note\n\n\n    class NoteIndex(indexes.SearchIndex, indexes.Indexable):\n        text = indexes.CharField(document=True, use_template=True)\n        author = indexes.CharField(model_attr='user')\n        pub_date = indexes.DateTimeField(model_attr='pub_date')\n\n        def get_model(self):\n            return Note\n\n        def index_queryset(self, using=None):\n            \"\"\"Used when the entire index for model is updated.\"\"\"\n            return self.get_model().objects.filter(pub_date__lte=datetime.datetime.now())\n\nEvery ``SearchIndex`` requires there be one (and only one) field with\n``document=True``. This indicates to both Haystack and the search engine about\nwhich field is the primary field for searching within.\n\n.. warning::\n\n    When you choose a ``document=True`` field, it should be consistently named\n    across all of your ``SearchIndex`` classes to avoid confusing the backend.\n    The convention is to name this field ``text``.\n\n    There is nothing special about the ``text`` field name used in all of the\n    examples. It could be anything; you could call it ``pink_polka_dot`` and\n    it won't matter. It's simply a convention to call it ``text``.\n    \n    To use a document field with a name other than ``text``, be sure to configure\n    the ``HAYSTACK_DOCUMENT_FIELD`` setting. For example,::\n    \n        HAYSTACK_DOCUMENT_FIELD = 'pink_polka_dot'\n\nAdditionally, we're providing ``use_template=True`` on the ``text`` field. This\nallows us to use a data template (rather than error-prone concatenation) to\nbuild the document the search engine will index. You’ll need to\ncreate a new template inside your template directory called\n``search/indexes/myapp/note_text.txt`` and place the following inside::\n\n    {{ object.title }}\n    {{ object.user.get_full_name }}\n    {{ object.body }}\n\nIn addition, we added several other fields (``author`` and ``pub_date``). These\nare useful when you want to provide additional filtering options. Haystack comes\nwith a variety of ``SearchField`` classes to handle most types of data.\n\nA common theme is to allow admin users to add future content but have it not\ndisplay on the site until that future date is reached. We specify a custom\n``index_queryset`` method to prevent those future items from being indexed.\n\n.. _Django admin site: http://docs.djangoproject.com/en/dev/ref/contrib/admin/\n\n\nSetting Up The Views\n====================\n\nAdd The ``SearchView`` To Your URLconf\n--------------------------------------\n\nWithin your URLconf, add the following line::\n\n    url(r'^search/', include('haystack.urls')),\n\nThis will pull in the default URLconf for Haystack. It consists of a single\nURLconf that points to a ``SearchView`` instance. You can change this class's\nbehavior by passing it any of several keyword arguments or override it entirely\nwith your own view.\n\n\nSearch Template\n---------------\n\nYour search template (``search/search.html`` for the default case) will likely\nbe very simple. The following is enough to get going (your template/block names\nwill likely differ)::\n\n    {% extends 'base.html' %}\n\n    {% block content %}\n        <h2>Search</h2>\n\n        <form method=\"get\" action=\".\">\n            <table>\n                {{ form.as_table }}\n                <tr>\n                    <td>&nbsp;</td>\n                    <td>\n                        <input type=\"submit\" value=\"Search\">\n                    </td>\n                </tr>\n            </table>\n\n            {% if query %}\n                <h3>Results</h3>\n\n                {% for result in page.object_list %}\n                    <p>\n                        <a href=\"{{ result.object.get_absolute_url }}\">{{ result.object.title }}</a>\n                    </p>\n                {% empty %}\n                    <p>No results found.</p>\n                {% endfor %}\n\n                {% if page.has_previous or page.has_next %}\n                    <div>\n                        {% if page.has_previous %}<a href=\"?q={{ query }}&amp;page={{ page.previous_page_number }}\">{% endif %}&laquo; Previous{% if page.has_previous %}</a>{% endif %}\n                        |\n                        {% if page.has_next %}<a href=\"?q={{ query }}&amp;page={{ page.next_page_number }}\">{% endif %}Next &raquo;{% if page.has_next %}</a>{% endif %}\n                    </div>\n                {% endif %}\n            {% else %}\n                {# Show some example queries to run, maybe query syntax, something else? #}\n            {% endif %}\n        </form>\n    {% endblock %}\n\nNote that the ``page.object_list`` is actually a list of ``SearchResult``\nobjects instead of individual models. These objects have all the data returned\nfrom that record within the search index as well as score. They can also\ndirectly access the model for the result via ``{{ result.object }}``. So the\n``{{ result.object.title }}`` uses the actual ``Note`` object in the database\nand accesses its ``title`` field.\n\n\nReindex\n-------\n\nThe final step, now that you have everything setup, is to put your data in\nfrom your database into the search index. Haystack ships with a management\ncommand to make this process easy.\n\n.. note::\n\n    If you're using the Solr backend, you have an extra step. Solr's\n    configuration is XML-based, so you'll need to manually regenerate the\n    schema. You should run\n    ``./manage.py build_solr_schema`` first, drop the XML output in your\n    Solr's ``schema.xml`` file and restart your Solr server.\n\nSimply run ``./manage.py rebuild_index``. You'll get some totals of how many\nmodels were processed and placed in the index.\n\n.. note::\n\n    Using the standard ``SearchIndex``, your search index content is only\n    updated whenever you run either ``./manage.py update_index`` or start\n    afresh with ``./manage.py rebuild_index``.\n\n    You should cron up a ``./manage.py update_index`` job at whatever interval\n    works best for your site (using ``--age=<num_hours>`` reduces the number of\n    things to update).\n\n    Alternatively, if you have low traffic and/or your search engine can handle\n    it, the ``RealtimeSignalProcessor`` automatically handles updates/deletes\n    for you.\n\n\nComplete!\n=========\n\nYou can now visit the search section of your site, enter a search query and\nreceive search results back for the query! Congratulations!\n\n\nWhat's Next?\n============\n\nThis tutorial just scratches the surface of what Haystack provides. The\n``SearchQuerySet`` is the underpinning of all search in Haystack and provides\na powerful, ``QuerySet``-like API (see :ref:`ref-searchqueryset-api`). You can\nuse much more complicated ``SearchForms``/``SearchViews`` to give users a better\nUI (see :ref:`ref-views-and_forms`). And the :ref:`ref-best-practices` provides\ninsight into non-obvious or advanced usages of Haystack.\n"
  },
  {
    "path": "docs/utils.rst",
    "content": ".. _ref-utils:\n\n=========\nUtilities\n=========\n\nIncluded here are some of the general use bits included with Haystack.\n\n\n``get_identifier``\n------------------\n\n.. function:: get_identifier(obj_or_string)\n\nGets an unique identifier for the object or a string representing the\nobject.\n\nIf not overridden, uses ``<app_label>.<object_name>.<pk>``.\n"
  },
  {
    "path": "docs/views_and_forms.rst",
    "content": ".. _ref-views-and_forms:\n\n=============\nViews & Forms\n=============\n\n.. note::\n\n    As of version 2.4 the views in ``haystack.views.SearchView`` are deprecated in\n    favor of the new generic views in ``haystack.generic_views.SearchView``\n    which use the standard Django `class-based views`_ which are available in\n    every version of Django which is supported by Haystack.\n\n.. _class-based views: https://docs.djangoproject.com/en/1.7/topics/class-based-views/\n\nHaystack comes with some default, simple views & forms as well as some\ndjango-style views to help you get started and to cover the common cases.\nIncluded is a way to provide:\n\n  * Basic, query-only search.\n  * Search by models.\n  * Search with basic highlighted results.\n  * Faceted search.\n  * Search by models with basic highlighted results.\n\nMost processing is done by the forms provided by Haystack via the ``search``\nmethod. As a result, all but the faceted types (see :doc:`faceting`) use the\nstandard ``SearchView``.\n\nThere is very little coupling between the forms & the views (other than relying\non the existence of a ``search`` method on the form), so you may interchangeably\nuse forms and/or views anywhere within your own code.\n\nForms\n=====\n\n.. currentmodule:: haystack.forms\n\n``SearchForm``\n--------------\n\nThe most basic of the form types, this form consists of a single field, the\n``q`` field (for query). Upon searching, the form will take the cleaned contents\nof the ``q`` field and perform an ``auto_query`` on either the custom\n``SearchQuerySet`` you provide or off a default ``SearchQuerySet``.\n\nTo customize the ``SearchQuerySet`` the form will use, pass it a\n``searchqueryset`` parameter to the constructor with the ``SearchQuerySet``\nyou'd like to use. If using this form in conjunction with a ``SearchView``,\nthe form will receive whatever ``SearchQuerySet`` you provide to the view with\nno additional work needed.\n\nThe ``SearchForm`` also accepts a ``load_all`` parameter (``True`` or\n``False``), which determines how the database is queried when iterating through\nthe results. This also is received automatically from the ``SearchView``.\n\nAll other forms in Haystack inherit (either directly or indirectly) from this\nform.\n\n``HighlightedSearchForm``\n-------------------------\n\nIdentical to the ``SearchForm`` except that it tags the ``highlight`` method on\nto the end of the ``SearchQuerySet`` to enable highlighted results.\n\n``ModelSearchForm``\n-------------------\n\nThis form adds new fields to form. It iterates through all registered models for\nthe current ``SearchSite`` and provides a checkbox for each one. If no models\nare selected, all types will show up in the results.\n\n``HighlightedModelSearchForm``\n------------------------------\n\nIdentical to the ``ModelSearchForm`` except that it tags the ``highlight``\nmethod on to the end of the ``SearchQuerySet`` to enable highlighted results on\nthe selected models.\n\n``FacetedSearchForm``\n---------------------\n\nIdentical to the ``SearchForm`` except that it adds a hidden ``selected_facets``\nfield onto the form, allowing the form to narrow the results based on the facets\nchosen by the user.\n\nCreating Your Own Form\n----------------------\n\nThe simplest way to go about creating your own form is to inherit from\n``SearchForm`` (or the desired parent) and extend the ``search`` method. By\ndoing this, you save yourself most of the work of handling data correctly and\nstay API compatible with the ``SearchView``.\n\nFor example, let's say you're providing search with a user-selectable date range\nassociated with it. You might create a form that looked as follows::\n\n    from django import forms\n    from haystack.forms import SearchForm\n\n\n    class DateRangeSearchForm(SearchForm):\n        start_date = forms.DateField(required=False)\n        end_date = forms.DateField(required=False)\n\n        def search(self):\n            # First, store the SearchQuerySet received from other processing.\n            sqs = super(DateRangeSearchForm, self).search()\n\n            if not self.is_valid():\n                return self.no_query_found()\n\n            # Check to see if a start_date was chosen.\n            if self.cleaned_data['start_date']:\n                sqs = sqs.filter(pub_date__gte=self.cleaned_data['start_date'])\n\n            # Check to see if an end_date was chosen.\n            if self.cleaned_data['end_date']:\n                sqs = sqs.filter(pub_date__lte=self.cleaned_data['end_date'])\n\n            return sqs\n\nThis form adds two new fields for (optionally) choosing the start and end dates.\nWithin the ``search`` method, we grab the results from the parent form's\nprocessing. Then, if a user has selected a start and/or end date, we apply that\nfiltering. Finally, we simply return the ``SearchQuerySet``.\n\nViews\n=====\n\n.. currentmodule:: haystack.views\n\n.. note::\n\n    As of version 2.4 the views in ``haystack.views.SearchView`` are deprecated in\n    favor of the new generic views in ``haystack.generic_views.SearchView``\n    which use the standard Django `class-based views`_ which are available in\n    every version of Django which is supported by Haystack.\n\n.. _class-based views: https://docs.djangoproject.com/en/1.7/topics/class-based-views/\n\nNew Django Class Based Views\n----------------------------\n\n .. versionadded:: 2.4.0\n\nThe views in ``haystack.generic_views.SearchView`` inherit from Django’s standard\n`FormView <https://docs.djangoproject.com/en/1.7/ref/class-based-views/generic-editing/#formview>`_.\nThe example views can be customized like any other Django class-based view as\ndemonstrated in this example which filters the search results in ``get_queryset``::\n\n    # views.py\n    from datetime import date\n\n    from haystack.generic_views import SearchView\n\n    class MySearchView(SearchView):\n        \"\"\"My custom search view.\"\"\"\n\n        def get_queryset(self):\n            queryset = super(MySearchView, self).get_queryset()\n            # further filter queryset based on some set of criteria\n            return queryset.filter(pub_date__gte=date(2015, 1, 1))\n\n        def get_context_data(self, *args, **kwargs):\n            context = super(MySearchView, self).get_context_data(*args, **kwargs)\n            # do something\n            return context\n\n    # urls.py\n\n    urlpatterns = [\n        url(r'^/search/?$', MySearchView.as_view(), name='search_view'),\n    ]\n\n\nUpgrading\n~~~~~~~~~\n\nUpgrading from basic usage of the old-style views to new-style views is usually as simple as:\n\n#. Create new views under ``views.py`` subclassing ``haystack.generic_views.SearchView``\n   or ``haystack.generic_views.FacetedSearchView``\n#. Move all parameters of your old-style views from your ``urls.py`` to attributes on\n   your new views. This will require renaming ``searchqueryset`` to ``queryset`` and\n   ``template`` to ``template_name``\n#. Review your templates and replace the ``page`` variable with ``page_obj``\n\nHere's an example::\n\n    ### old-style views...\n    # urls.py\n\n    sqs = SearchQuerySet().filter(author='john')\n\n    urlpatterns = [\n        url(r'^$', SearchView(\n            template='my/special/path/john_search.html',\n            searchqueryset=sqs,\n            form_class=SearchForm\n        ), name='haystack_search'),\n    ]\n\n    ### new-style views...\n    # views.py\n\n    class JohnSearchView(SearchView):\n        template_name = 'my/special/path/john_search.html'\n        queryset = SearchQuerySet().filter(author='john')\n        form_class = SearchForm\n\n    # urls.py\n    from myapp.views import JohnSearchView\n\n    urlpatterns = [\n        url(r'^$', JohnSearchView.as_view(), name='haystack_search'),\n    ]\n\n\nIf your views overrode methods on the old-style SearchView, you will need to\nrefactor those methods to the equivalents on Django's generic views. For example,\nif you previously used ``extra_context()`` to add additional template variables or\npreprocess the values returned by Haystack, that code would move to ``get_context_data``\n\n+-----------------------+-------------------------------------------+\n| Old Method            | New Method                                |\n+=======================+===========================================+\n| ``extra_context()``   | `get_context_data()`_                     |\n+-----------------------+-------------------------------------------+\n| ``create_response()`` | `dispatch()`_ or ``get()`` and ``post()`` |\n+-----------------------+-------------------------------------------+\n| ``get_query()``       | `get_queryset()`_                         |\n+-----------------------+-------------------------------------------+\n\n.. _get_context_data(): https://docs.djangoproject.com/en/1.7/ref/class-based-views/mixins-simple/#django.views.generic.base.ContextMixin.get_context_data\n.. _dispatch(): https://docs.djangoproject.com/en/1.7/ref/class-based-views/base/#django.views.generic.base.View.dispatch\n.. _get_queryset(): https://docs.djangoproject.com/en/1.7/ref/class-based-views/mixins-multiple-object/#django.views.generic.list.MultipleObjectMixin.get_queryset\n\n\nOld-Style Views\n---------------\n\n .. deprecated:: 2.4.0\n\nHaystack comes bundled with three views, the class-based views (``SearchView`` &\n``FacetedSearchView``) and a traditional functional view (``basic_search``).\n\nThe class-based views provide for easy extension should you need to alter the\nway a view works. Except in the case of faceting (again, see :doc:`faceting`),\nthe ``SearchView`` works interchangeably with all other forms provided by\nHaystack.\n\nThe functional view provides an example of how Haystack can be used in more\ntraditional settings or as an example of how to write a more complex custom\nview. It is also thread-safe.\n\n``SearchView(template=None, load_all=True, form_class=None, searchqueryset=None, results_per_page=None)``\n---------------------------------------------------------------------------------------------------------------------------------------\n\nThe ``SearchView`` is designed to be easy/flexible enough to override common\nchanges as well as being internally abstracted so that only altering a specific\nportion of the code should be easy to do.\n\nWithout touching any of the internals of the ``SearchView``, you can modify\nwhich template is used, which form class should be instantiated to search with,\nwhat ``SearchQuerySet`` to use in the event you wish to pre-filter the results.\nwhat ``Context``-style object to use in the response and the ``load_all``\nperformance optimization to reduce hits on the database. These options can (and\ngenerally should) be overridden at the URLconf level. For example, to have a\ncustom search limited to the 'John' author, displaying all models to search by\nand specifying a custom template (``my/special/path/john_search.html``), your\nURLconf should look something like::\n\n    from django.conf.urls import url\n    from haystack.forms import ModelSearchForm\n    from haystack.query import SearchQuerySet\n    from haystack.views import SearchView\n\n    sqs = SearchQuerySet().filter(author='john')\n\n    # Without threading...\n    urlpatterns = [\n        url(r'^$', SearchView(\n            template='my/special/path/john_search.html',\n            searchqueryset=sqs,\n            form_class=SearchForm\n        ), name='haystack_search'),\n    ]\n\n    # With threading...\n    from haystack.views import SearchView, search_view_factory\n\n    urlpatterns = [\n        url(r'^$', search_view_factory(\n            view_class=SearchView,\n            template='my/special/path/john_search.html',\n            searchqueryset=sqs,\n            form_class=ModelSearchForm\n        ), name='haystack_search'),\n    ]\n\n.. warning::\n\n    The standard ``SearchView`` is not thread-safe. Use the\n    ``search_view_factory`` function, which returns thread-safe instances of\n    ``SearchView``.\n\nBy default, if you don't specify a ``form_class``, the view will use the\n``haystack.forms.ModelSearchForm`` form.\n\nBeyond this customizations, you can create your own ``SearchView`` and\nextend/override the following methods to change the functionality.\n\n``__call__(self, request)``\n~~~~~~~~~~~~~~~~~~~~~~~~~~~\n\nGenerates the actual response to the search.\n\nRelies on internal, overridable methods to construct the response. You generally\nshould avoid altering this method unless you need to change the flow of the\nmethods or to add a new method into the processing.\n\n``build_form(self, form_kwargs=None)``\n~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~\n\nInstantiates the form the class should use to process the search query.\n\nOptionally accepts a dictionary of parameters that are passed on to the\nform's ``__init__``. You can use this to lightly customize the form.\n\nYou should override this if you write a custom form that needs special\nparameters for instantiation.\n\n``get_query(self)``\n~~~~~~~~~~~~~~~~~~~\n\nReturns the query provided by the user.\n\nReturns an empty string if the query is invalid. This pulls the cleaned query\nfrom the form, via the ``q`` field, for use elsewhere within the ``SearchView``.\nThis is used to populate the ``query`` context variable.\n\n``get_results(self)``\n~~~~~~~~~~~~~~~~~~~~~\n\nFetches the results via the form.\n\nReturns an empty list if there's no query to search with. This method relies on\nthe form to do the heavy lifting as much as possible.\n\n``build_page(self)``\n~~~~~~~~~~~~~~~~~~~~\n\nPaginates the results appropriately.\n\nIn case someone does not want to use Django's built-in pagination, it\nshould be a simple matter to override this method to do what they would\nlike.\n\n``extra_context(self)``\n~~~~~~~~~~~~~~~~~~~~~~~\n\nAllows the addition of more context variables as needed. Must return a\ndictionary whose contents will add to or overwrite the other variables in the\ncontext.\n\n``create_response(self)``\n~~~~~~~~~~~~~~~~~~~~~~~~~\n\nGenerates the actual HttpResponse to send back to the user. It builds the page,\ncreates the context and renders the response for all the aforementioned\nprocessing.\n\n\n``basic_search(request, template='search/search.html', load_all=True, form_class=ModelSearchForm, searchqueryset=None, extra_context=None, results_per_page=None)``\n-------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------\n\nThe ``basic_search`` tries to provide most of the same functionality as the\nclass-based views but resembles a more traditional generic view. It's both a\nworking view if you prefer not to use the class-based views as well as a good\nstarting point for writing highly custom views.\n\nSince it is all one function, the only means of extension are passing in\nkwargs, similar to the way generic views work.\n\n\nCreating Your Own View\n----------------------\n\nAs with the forms, inheritance is likely your best bet. In this case, the\n``FacetedSearchView`` is a perfect example of how to extend the existing\n``SearchView``. The complete code for the ``FacetedSearchView`` looks like::\n\n    class FacetedSearchView(SearchView):\n        def extra_context(self):\n            extra = super(FacetedSearchView, self).extra_context()\n\n            if self.results == []:\n                extra['facets'] = self.form.search().facet_counts()\n            else:\n                extra['facets'] = self.results.facet_counts()\n\n            return extra\n\nIt updates the name of the class (generally for documentation purposes) and\nadds the facets from the ``SearchQuerySet`` to the context as the ``facets``\nvariable. As with the custom form example above, it relies on the parent class\nto handle most of the processing and extends that only where needed.\n"
  },
  {
    "path": "docs/who_uses.rst",
    "content": ".. _ref-who-uses:\n\nSites Using Haystack\n====================\n\nThe following sites are a partial list of people using Haystack. I'm always\ninterested in adding more sites, so please find me (``daniellindsley``) via\nIRC or the mailing list thread.\n\n\nLJWorld/Lawrence.com/KUSports\n-----------------------------\n\nFor all things search-related.\n\nUsing: Solr\n\n* http://www2.ljworld.com/search/\n* http://www2.ljworld.com/search/vertical/news.story/\n* http://www2.ljworld.com/marketplace/\n* http://www.lawrence.com/search/\n* http://www.kusports.com/search/\n\n\nAltWeeklies\n-----------\n\nProviding an API to story aggregation.\n\nUsing: Whoosh\n\n* http://www.northcoastjournal.com/altweeklies/documentation/\n\n\nTeachoo\n-----------\n\nTeachoo uses Haystack for its site search.\n\nUsing: Elasticsearch\n\n* https://www.teachoo.com/\n\n\nTrapeze\n-------\n\nVarious projects.\n\nUsing: Xapian\n\n* http://www.trapeze.com/\n* http://www.windmobile.ca/\n* http://www.bonefishgrill.com/\n* http://www.canadiantire.ca/ (Portions of)\n\n\nVickerey.com\n------------\n\nFor (really well done) search & faceting.\n\nUsing: Solr\n\n* http://store.vickerey.com/products/search/\n\n\nEldarion\n--------\n\nVarious projects.\n\nUsing: Solr\n\n* http://eldarion.com/\n\n\nSunlight Labs\n-------------\n\nFor general search.\n\nUsing: Whoosh & Solr\n\n* http://sunlightlabs.com/\n* http://subsidyscope.com/\n\n\nNASA\n----\n\nFor general search.\n\nUsing: Solr\n\n* An internal site called SMD Spacebook 1.1.\n* http://science.nasa.gov/\n\n\nAllForLocal\n-----------\n\nFor general search.\n\n* http://www.allforlocal.com/\n\n\nHUGE\n----\n\nVarious projects.\n\nUsing: Solr\n\n* http://hugeinc.com/\n* http://houselogic.com/\n\n\nBrick Design\n------------\n\nFor search on Explore.\n\nUsing: Solr\n\n* http://bricksf.com/\n* http://explore.org/\n\n\nWinding Road\n------------\n\nFor general search.\n\nUsing: Solr\n\n* http://www.windingroad.com/\n\n\nReddit\n------\n\nFor Reddit Gifts.\n\nUsing: Whoosh\n\n* http://redditgifts.com/\n\n\nPegasus News\n------------\n\nFor general search.\n\nUsing: Xapian\n\n* http://www.pegasusnews.com/\n\n\nRampframe\n---------\n\nFor general search.\n\nUsing: Xapian\n\n* http://www.rampframe.com/\n\n\nForkinit\n--------\n\nFor general search, model-specific search and suggestions via MLT.\n\nUsing: Solr\n\n* http://forkinit.com/\n\n\nStructured Abstraction\n----------------------\n\nFor general search.\n\nUsing: Xapian\n\n* http://www.structuredabstraction.com/\n* http://www.delivergood.org/\n\n\nCustomMade\n----------\n\nFor general search.\n\nUsing: Solr\n\n* http://www.custommade.com/\n\n\nUniversity of the Andes, Dept. of Political Science\n---------------------------------------------------\n\nFor general search & section-specific search. Developed by Monoku.\n\nUsing: Solr\n\n* http://www.congresovisible.org/\n* http://www.monoku.com/\n\n\nChristchurch Art Gallery\n------------------------\n\nFor general search & section-specific search.\n\nUsing: Solr\n\n* http://christchurchartgallery.org.nz/search/\n* http://christchurchartgallery.org.nz/collection/browse/\n\n\nDevCheatSheet.com\n-----------------\n\nFor general search.\n\nUsing: Xapian\n\n* http://devcheatsheet.com/\n\n\nTodasLasRecetas\n---------------\n\nFor search, faceting & More Like This.\n\nUsing: Solr\n\n* http://www.todaslasrecetas.es/receta/s/?q=langostinos\n* http://www.todaslasrecetas.es/receta/9526/brochetas-de-langostinos\n\n\nAstroBin\n--------\n\nFor general search.\n\nUsing: Solr\n\n* http://www.astrobin.com/\n\n\nEuropean Paper Company\n----------------------\n\nFor general search.\n\nUsing: ???\n\n* http://europeanpaper.com/\n\n\nmtn-op\n------\n\nFor general search.\n\nUsing: ???\n\n* http://mountain-op.com/\n\n\nCrate\n-----\n\nCrate is a PyPI mirror/replacement. It's using Haystack to power all search &\nfaceted navigation on the site.\n\nUsing: Elasticsearch\n\n* https://crate.io/\n\n\nPix Populi\n----------\n\nPix Populi is a popular French photo sharing site.\n\nUsing: Solr\n\n* http://www.pix-populi.fr/\n\n\nLocalWiki\n----------\n\nLocalWiki is a tool for collaborating in local, geographic communities.\nIt's using Haystack to power search on every LocalWiki instance.\n\nUsing: Solr\n\n* http://localwiki.org/\n\n\nPitchup\n-------\n\nFor faceting, geo and autocomplete.\n\nUsing: ???\n\n* http://www.pitchup.com/search/\n\n\nGidsy\n-----\n\nGidsy makes it easy for anyone to organize and find exciting things\nto do everywhere in the world.\n\nFor activity search, area pages, forums and private messages.\n\nUsing: Elasticsearch\n\n* https://gidsy.com/\n* https://gidsy.com/search/\n* https://gidsy.com/forum/\n\n\nGroundCity\n----------\n\nGroundcity is a Romanian dynamic real estate site.\n\nFor real estate, forums and comments.\n\nUsing: Whoosh\n\n* http://groundcity.ro/cautare/\n\n\nDocket Alarm\n------------\n\nDocket Alarm allows people to search court dockets across\nthe country. With it, you can search court dockets in the International Trade\nCommission (ITC), the Patent Trial and Appeal Board (PTAB) and All Federal\nCourts.\n\nUsing: Elasticsearch\n\n* https://www.docketalarm.com/search/ITC\n* https://www.docketalarm.com/search/PTAB\n* https://www.docketalarm.com/search/dockets\n\n\nEducreations\n-------------\n\nEducreations makes it easy for anyone to teach what they know and learn\nwhat they don't with a recordable whiteboard. Haystack is used to\nprovide search across users and lessons.\n\nUsing: Solr\n\n* http://www.educreations.com/browse/\n"
  },
  {
    "path": "example_project/__init__.py",
    "content": ""
  },
  {
    "path": "example_project/bare_bones_app/__init__.py",
    "content": ""
  },
  {
    "path": "example_project/bare_bones_app/models.py",
    "content": "import datetime\n\nfrom django.db import models\n\n\nclass Cat(models.Model):\n    name = models.CharField(max_length=255)\n    birth_date = models.DateField(default=datetime.date.today)\n    bio = models.TextField(blank=True)\n    created = models.DateTimeField(default=datetime.datetime.now)\n    updated = models.DateTimeField(default=datetime.datetime.now)\n\n    def __str__(self):\n        return self.name\n\n    @models.permalink\n    def get_absolute_url(self):\n        return (\"cat_detail\", [], {\"id\": self.id})\n"
  },
  {
    "path": "example_project/bare_bones_app/search_indexes.py",
    "content": "from bare_bones_app.models import Cat\n\nfrom haystack import indexes\n\n\n# For the most basic usage, you can use a subclass of\n# `haystack.indexes.BasicSearchIndex`, whose only requirement will be that\n# you create a `search/indexes/bare_bones_app/cat_text.txt` data template\n# for indexing.\nclass CatIndex(indexes.BasicSearchIndex, indexes.Indexable):\n    def get_model(self):\n        return Cat\n"
  },
  {
    "path": "example_project/regular_app/__init__.py",
    "content": ""
  },
  {
    "path": "example_project/regular_app/models.py",
    "content": "import datetime\n\nfrom django.db import models\n\nBREED_CHOICES = [\n    (\"collie\", \"Collie\"),\n    (\"labrador\", \"Labrador\"),\n    (\"pembroke\", \"Pembroke Corgi\"),\n    (\"shetland\", \"Shetland Sheepdog\"),\n    (\"border\", \"Border Collie\"),\n]\n\n\nclass Dog(models.Model):\n    breed = models.CharField(max_length=255, choices=BREED_CHOICES)\n    name = models.CharField(max_length=255)\n    owner_last_name = models.CharField(max_length=255, blank=True)\n    birth_date = models.DateField(default=datetime.date.today)\n    bio = models.TextField(blank=True)\n    public = models.BooleanField(default=True)\n    created = models.DateTimeField(default=datetime.datetime.now)\n    updated = models.DateTimeField(default=datetime.datetime.now)\n\n    def __str__(self):\n        return self.full_name()\n\n    @models.permalink\n    def get_absolute_url(self):\n        return (\"dog_detail\", [], {\"id\": self.id})\n\n    def full_name(self):\n        if self.owner_last_name:\n            return \"%s %s\" % (self.name, self.owner_last_name)\n\n        return self.name\n\n\nclass Toy(models.Model):\n    dog = models.ForeignKey(Dog, related_name=\"toys\")\n    name = models.CharField(max_length=60)\n\n    def __str__(self):\n        return \"%s's %s\" % (self.dog.name, self.name)\n"
  },
  {
    "path": "example_project/regular_app/search_indexes.py",
    "content": "from regular_app.models import Dog\n\nfrom haystack import indexes\n\n\n# More typical usage involves creating a subclassed `SearchIndex`. This will\n# provide more control over how data is indexed, generally resulting in better\n# search.\nclass DogIndex(indexes.SearchIndex, indexes.Indexable):\n    text = indexes.CharField(document=True, use_template=True)\n    # We can pull data straight out of the model via `model_attr`.\n    breed = indexes.CharField(model_attr=\"breed\")\n    # Note that callables are also OK to use.\n    name = indexes.CharField(model_attr=\"full_name\")\n    bio = indexes.CharField(model_attr=\"name\")\n    birth_date = indexes.DateField(model_attr=\"birth_date\")\n    # Note that we can't assign an attribute here. We'll manually prepare it instead.\n    toys = indexes.MultiValueField()\n\n    def get_model(self):\n        return Dog\n\n    def index_queryset(self, using=None):\n        return self.get_model().objects.filter(public=True)\n\n    def prepare_toys(self, obj):\n        # Store a list of id's for filtering\n        return [toy.id for toy in obj.toys.all()]\n\n        # Alternatively, you could store the names if searching for toy names\n        # is more useful.\n        # return [toy.name for toy in obj.toys.all()]\n"
  },
  {
    "path": "example_project/settings.py",
    "content": "import os\n\nfrom django.conf import settings\n\nSECRET_KEY = \"CHANGE ME\"\n\n# All the normal settings apply. What's included here are the bits you'll have\n# to customize.\n\n# Add Haystack to INSTALLED_APPS. You can do this by simply placing in your list.\nINSTALLED_APPS = settings.INSTALLED_APPS + (\"haystack\",)\n\n\nHAYSTACK_CONNECTIONS = {\n    \"default\": {\n        # For Solr:\n        \"ENGINE\": \"haystack.backends.solr_backend.SolrEngine\",\n        \"URL\": \"http://localhost:9001/solr/example\",\n        \"TIMEOUT\": 60 * 5,\n        \"INCLUDE_SPELLING\": True,\n    },\n    \"elasticsearch\": {\n        \"ENGINE\": \"haystack.backends.elasticsearch_backend.ElasticsearchSearchEngine\",\n        \"URL\": \"http://localhost:9200\",\n        \"INDEX_NAME\": \"example_project\",\n    },\n    \"whoosh\": {\n        # For Whoosh:\n        \"ENGINE\": \"haystack.backends.whoosh_backend.WhooshEngine\",\n        \"PATH\": os.path.join(os.path.dirname(__file__), \"whoosh_index\"),\n        \"INCLUDE_SPELLING\": True,\n    },\n    \"simple\": {\n        # For Simple:\n        \"ENGINE\": \"haystack.backends.simple_backend.SimpleEngine\"\n    },\n    # 'xapian': {\n    #     # For Xapian (requires the third-party install):\n    #     'ENGINE': 'xapian_backend.XapianEngine',\n    #     'PATH': os.path.join(os.path.dirname(__file__), 'xapian_index'),\n    # }\n}\n"
  },
  {
    "path": "example_project/templates/search/indexes/bare_bones_app/cat_text.txt",
    "content": "{{ object.name }}\n{{ object.bio }}"
  },
  {
    "path": "example_project/templates/search/indexes/regular_app/dog_text.txt",
    "content": "{{ object.full_name }}\n{{ object.breed }}\n{{ object.bio }}\n\n{% for toy in object.toys.all %}\n    {{ toy.name }}\n{% endfor %}"
  },
  {
    "path": "haystack/__init__.py",
    "content": "from django.conf import settings\nfrom django.core.exceptions import ImproperlyConfigured\nfrom pkg_resources import DistributionNotFound, get_distribution, parse_version\n\nfrom haystack.constants import DEFAULT_ALIAS\nfrom haystack.utils import loading\n\n__author__ = \"Daniel Lindsley\"\n\ntry:\n    pkg_distribution = get_distribution(\"django-haystack\")\n    __version__ = pkg_distribution.version\n    version_info = pkg_distribution.parsed_version\nexcept DistributionNotFound:\n    __version__ = \"0.0.dev0\"\n    version_info = parse_version(__version__)\n\ndefault_app_config = \"haystack.apps.HaystackConfig\"\n\n\n# Help people clean up from 1.X.\nif hasattr(settings, \"HAYSTACK_SITECONF\"):\n    raise ImproperlyConfigured(\n        \"The HAYSTACK_SITECONF setting is no longer used & can be removed.\"\n    )\nif hasattr(settings, \"HAYSTACK_SEARCH_ENGINE\"):\n    raise ImproperlyConfigured(\n        \"The HAYSTACK_SEARCH_ENGINE setting has been replaced with HAYSTACK_CONNECTIONS.\"\n    )\nif hasattr(settings, \"HAYSTACK_ENABLE_REGISTRATIONS\"):\n    raise ImproperlyConfigured(\n        \"The HAYSTACK_ENABLE_REGISTRATIONS setting is no longer used & can be removed.\"\n    )\nif hasattr(settings, \"HAYSTACK_INCLUDE_SPELLING\"):\n    raise ImproperlyConfigured(\n        \"The HAYSTACK_INCLUDE_SPELLING setting is now a per-backend setting\"\n        \" & belongs in HAYSTACK_CONNECTIONS.\"\n    )\n\n\n# Check the 2.X+ bits.\nif not hasattr(settings, \"HAYSTACK_CONNECTIONS\"):\n    raise ImproperlyConfigured(\"The HAYSTACK_CONNECTIONS setting is required.\")\nif DEFAULT_ALIAS not in settings.HAYSTACK_CONNECTIONS:\n    raise ImproperlyConfigured(\n        \"The default alias '%s' must be included in the HAYSTACK_CONNECTIONS setting.\"\n        % DEFAULT_ALIAS\n    )\n\n# Load the connections.\nconnections = loading.ConnectionHandler(settings.HAYSTACK_CONNECTIONS)\n\n# Just check HAYSTACK_ROUTERS setting validity, routers will be loaded lazily\nif hasattr(settings, \"HAYSTACK_ROUTERS\"):\n    if not isinstance(settings.HAYSTACK_ROUTERS, (list, tuple)):\n        raise ImproperlyConfigured(\n            \"The HAYSTACK_ROUTERS setting must be either a list or tuple.\"\n        )\n\n# Load the router(s).\nconnection_router = loading.ConnectionRouter()\n\n\n# Per-request, reset the ghetto query log.\n# Probably not extraordinarily thread-safe but should only matter when\n# DEBUG = True.\ndef reset_search_queries(**kwargs):\n    for conn in connections.all():\n        if conn:\n            conn.reset_queries()\n\n\nif settings.DEBUG:\n    from django.core import signals as django_signals\n\n    django_signals.request_started.connect(reset_search_queries)\n"
  },
  {
    "path": "haystack/admin.py",
    "content": "from django.contrib.admin.options import ModelAdmin, csrf_protect_m\nfrom django.contrib.admin.views.main import SEARCH_VAR, ChangeList\nfrom django.core.exceptions import PermissionDenied\nfrom django.core.paginator import InvalidPage, Paginator\nfrom django.shortcuts import render\nfrom django.utils.encoding import force_str\nfrom django.utils.translation import ungettext\n\nfrom haystack import connections\nfrom haystack.constants import DEFAULT_ALIAS\nfrom haystack.query import SearchQuerySet\nfrom haystack.utils import get_model_ct_tuple\n\n\nclass SearchChangeList(ChangeList):\n    def __init__(self, **kwargs):\n        self.haystack_connection = kwargs.pop(\"haystack_connection\", DEFAULT_ALIAS)\n        super().__init__(**kwargs)\n\n    def get_results(self, request):\n        if SEARCH_VAR not in request.GET:\n            return super().get_results(request)\n\n        # Note that pagination is 0-based, not 1-based.\n        sqs = (\n            SearchQuerySet(self.haystack_connection)\n            .models(self.model)\n            .auto_query(request.GET[SEARCH_VAR])\n            .load_all()\n        )\n\n        paginator = Paginator(sqs, self.list_per_page)\n        # Get the number of objects, with admin filters applied.\n        result_count = paginator.count\n        full_result_count = (\n            SearchQuerySet(self.haystack_connection).models(self.model).all().count()\n        )\n\n        can_show_all = result_count <= self.list_max_show_all\n        multi_page = result_count > self.list_per_page\n\n        # Get the list of objects to display on this page.\n        try:\n            result_list = paginator.page(self.page_num + 1).object_list\n            # Grab just the Django models, since that's what everything else is\n            # expecting.\n            result_list = [result.object for result in result_list]\n        except InvalidPage:\n            result_list = ()\n\n        self.result_count = result_count\n        self.full_result_count = full_result_count\n        self.result_list = result_list\n        self.can_show_all = can_show_all\n        self.multi_page = multi_page\n        self.paginator = paginator\n\n\nclass SearchModelAdminMixin(object):\n    # haystack connection to use for searching\n    haystack_connection = DEFAULT_ALIAS\n\n    @csrf_protect_m\n    def changelist_view(self, request, extra_context=None):\n        if not self.has_change_permission(request, None):\n            raise PermissionDenied\n\n        if SEARCH_VAR not in request.GET:\n            # Do the usual song and dance.\n            return super().changelist_view(request, extra_context)\n\n        # Do a search of just this model and populate a Changelist with the\n        # returned bits.\n        indexed_models = (\n            connections[self.haystack_connection]\n            .get_unified_index()\n            .get_indexed_models()\n        )\n\n        if self.model not in indexed_models:\n            # Oops. That model isn't being indexed. Return the usual\n            # behavior instead.\n            return super().changelist_view(request, extra_context)\n\n        # So. Much. Boilerplate.\n        # Why copy-paste a few lines when you can copy-paste TONS of lines?\n        list_display = list(self.list_display)\n\n        kwargs = {\n            \"haystack_connection\": self.haystack_connection,\n            \"request\": request,\n            \"model\": self.model,\n            \"list_display\": list_display,\n            \"list_display_links\": self.list_display_links,\n            \"list_filter\": self.list_filter,\n            \"date_hierarchy\": self.date_hierarchy,\n            \"search_fields\": self.search_fields,\n            \"list_select_related\": self.list_select_related,\n            \"list_per_page\": self.list_per_page,\n            \"list_editable\": self.list_editable,\n            \"list_max_show_all\": self.list_max_show_all,\n            \"model_admin\": self,\n        }\n        if hasattr(self, \"get_sortable_by\"):  # Django 2.1+\n            kwargs[\"sortable_by\"] = self.get_sortable_by(request)\n        changelist = SearchChangeList(**kwargs)\n        changelist.formset = None\n        media = self.media\n\n        # Build the action form and populate it with available actions.\n        # Check actions to see if any are available on this changelist\n        actions = self.get_actions(request)\n        if actions:\n            action_form = self.action_form(auto_id=None)\n            action_form.fields[\"action\"].choices = self.get_action_choices(request)\n        else:\n            action_form = None\n\n        selection_note = ungettext(\n            \"0 of %(count)d selected\",\n            \"of %(count)d selected\",\n            len(changelist.result_list),\n        )\n        selection_note_all = ungettext(\n            \"%(total_count)s selected\",\n            \"All %(total_count)s selected\",\n            changelist.result_count,\n        )\n\n        context = {\n            \"module_name\": force_str(self.model._meta.verbose_name_plural),\n            \"selection_note\": selection_note % {\"count\": len(changelist.result_list)},\n            \"selection_note_all\": selection_note_all\n            % {\"total_count\": changelist.result_count},\n            \"title\": changelist.title,\n            \"is_popup\": changelist.is_popup,\n            \"cl\": changelist,\n            \"media\": media,\n            \"has_add_permission\": self.has_add_permission(request),\n            \"opts\": changelist.opts,\n            \"app_label\": self.model._meta.app_label,\n            \"action_form\": action_form,\n            \"actions_on_top\": self.actions_on_top,\n            \"actions_on_bottom\": self.actions_on_bottom,\n            \"actions_selection_counter\": getattr(self, \"actions_selection_counter\", 0),\n        }\n        context.update(extra_context or {})\n        request.current_app = self.admin_site.name\n        app_name, model_name = get_model_ct_tuple(self.model)\n        return render(\n            request,\n            self.change_list_template\n            or [\n                \"admin/%s/%s/change_list.html\" % (app_name, model_name),\n                \"admin/%s/change_list.html\" % app_name,\n                \"admin/change_list.html\",\n            ],\n            context,\n        )\n\n\nclass SearchModelAdmin(SearchModelAdminMixin, ModelAdmin):\n    pass\n"
  },
  {
    "path": "haystack/apps.py",
    "content": "import logging\n\nfrom django.apps import AppConfig\nfrom django.conf import settings\n\nfrom haystack import connection_router, connections\nfrom haystack.utils import loading\n\n\nclass HaystackConfig(AppConfig):\n    name = \"haystack\"\n    signal_processor = None\n    stream = None\n\n    def ready(self):\n        # Setup default logging.\n        log = logging.getLogger(\"haystack\")\n        self.stream = logging.StreamHandler()\n        self.stream.setLevel(logging.INFO)\n        log.addHandler(self.stream)\n\n        # Setup the signal processor.\n        if not self.signal_processor:\n            signal_processor_path = getattr(\n                settings,\n                \"HAYSTACK_SIGNAL_PROCESSOR\",\n                \"haystack.signals.BaseSignalProcessor\",\n            )\n            signal_processor_class = loading.import_class(signal_processor_path)\n            self.signal_processor = signal_processor_class(\n                connections, connection_router\n            )\n"
  },
  {
    "path": "haystack/backends/__init__.py",
    "content": "import copy\nfrom copy import deepcopy\nfrom time import time\n\nfrom django.conf import settings\nfrom django.db.models import Q\nfrom django.db.models.base import ModelBase\nfrom django.utils import tree\nfrom django.utils.encoding import force_str\n\nfrom haystack.constants import DEFAULT_ALIAS, FILTER_SEPARATOR, VALID_FILTERS\nfrom haystack.exceptions import FacetingError, MoreLikeThisError\nfrom haystack.models import SearchResult\nfrom haystack.utils import get_model_ct\nfrom haystack.utils.loading import UnifiedIndex\n\nVALID_GAPS = [\"year\", \"month\", \"day\", \"hour\", \"minute\", \"second\"]\n\nSPELLING_SUGGESTION_HAS_NOT_RUN = object()\n\n\ndef log_query(func):\n    \"\"\"\n    A decorator for pseudo-logging search queries. Used in the ``SearchBackend``\n    to wrap the ``search`` method.\n    \"\"\"\n\n    def wrapper(obj, query_string, *args, **kwargs):\n        start = time()\n\n        try:\n            return func(obj, query_string, *args, **kwargs)\n        finally:\n            stop = time()\n\n            if settings.DEBUG:\n                from haystack import connections\n\n                connections[obj.connection_alias].queries.append(\n                    {\n                        \"query_string\": query_string,\n                        \"additional_args\": args,\n                        \"additional_kwargs\": kwargs,\n                        \"time\": \"%.3f\" % (stop - start),\n                        \"start\": start,\n                        \"stop\": stop,\n                    }\n                )\n\n    return wrapper\n\n\nclass EmptyResults(object):\n    hits = 0\n    docs = []\n\n    def __len__(self):\n        return 0\n\n    def __getitem__(self, k):\n        if isinstance(k, slice):\n            return []\n        else:\n            raise IndexError(\"It's not here.\")\n\n\nclass BaseSearchBackend(object):\n    \"\"\"\n    Abstract search engine base class.\n    \"\"\"\n\n    # Backends should include their own reserved words/characters.\n    RESERVED_WORDS = []\n    RESERVED_CHARACTERS = []\n\n    def __init__(self, connection_alias, **connection_options):\n        self.connection_alias = connection_alias\n        self.timeout = connection_options.get(\"TIMEOUT\", 10)\n        self.include_spelling = connection_options.get(\"INCLUDE_SPELLING\", False)\n        self.batch_size = connection_options.get(\"BATCH_SIZE\", 1000)\n        self.silently_fail = connection_options.get(\"SILENTLY_FAIL\", True)\n        self.distance_available = connection_options.get(\"DISTANCE_AVAILABLE\", False)\n\n    def update(self, index, iterable, commit=True):\n        \"\"\"\n        Updates the backend when given a SearchIndex and a collection of\n        documents.\n\n        This method MUST be implemented by each backend, as it will be highly\n        specific to each one.\n        \"\"\"\n        raise NotImplementedError\n\n    def remove(self, obj_or_string):\n        \"\"\"\n        Removes a document/object from the backend. Can be either a model\n        instance or the identifier (i.e. ``app_name.model_name.id``) in the\n        event the object no longer exists.\n\n        This method MUST be implemented by each backend, as it will be highly\n        specific to each one.\n        \"\"\"\n        raise NotImplementedError\n\n    def clear(self, models=None, commit=True):\n        \"\"\"\n        Clears the backend of all documents/objects for a collection of models.\n\n        This method MUST be implemented by each backend, as it will be highly\n        specific to each one.\n        \"\"\"\n        raise NotImplementedError\n\n    @log_query\n    def search(self, query_string, **kwargs):\n        \"\"\"\n        Takes a query to search on and returns dictionary.\n\n        The query should be a string that is appropriate syntax for the backend.\n\n        The returned dictionary should contain the keys 'results' and 'hits'.\n        The 'results' value should be an iterable of populated SearchResult\n        objects. The 'hits' should be an integer count of the number of matched\n        results the search backend found.\n\n        This method MUST be implemented by each backend, as it will be highly\n        specific to each one.\n        \"\"\"\n        raise NotImplementedError\n\n    def build_search_kwargs(\n        self,\n        query_string,\n        sort_by=None,\n        start_offset=0,\n        end_offset=None,\n        fields=\"\",\n        highlight=False,\n        facets=None,\n        date_facets=None,\n        query_facets=None,\n        narrow_queries=None,\n        spelling_query=None,\n        within=None,\n        dwithin=None,\n        distance_point=None,\n        models=None,\n        limit_to_registered_models=None,\n        result_class=None,\n        **extra_kwargs\n    ):\n        # A convenience method most backends should include in order to make\n        # extension easier.\n        raise NotImplementedError\n\n    def prep_value(self, value):\n        \"\"\"\n        Hook to give the backend a chance to prep an attribute value before\n        sending it to the search engine. By default, just force it to unicode.\n        \"\"\"\n        return force_str(value)\n\n    def more_like_this(\n        self, model_instance, additional_query_string=None, result_class=None\n    ):\n        \"\"\"\n        Takes a model object and returns results the backend thinks are similar.\n\n        This method MUST be implemented by each backend, as it will be highly\n        specific to each one.\n        \"\"\"\n        raise NotImplementedError(\n            \"Subclasses must provide a way to fetch similar record via the 'more_like_this' method if supported by the backend.\"\n        )\n\n    def extract_file_contents(self, file_obj):\n        \"\"\"\n        Hook to allow backends which support rich-content types such as PDF,\n        Word, etc. extraction to process the provided file object and return\n        the contents for indexing\n\n        Returns None if metadata cannot be extracted; otherwise returns a\n        dictionary containing at least two keys:\n\n            :contents:\n                        Extracted full-text content, if applicable\n            :metadata:\n                        key:value pairs of text strings\n        \"\"\"\n\n        raise NotImplementedError(\n            \"Subclasses must provide a way to extract metadata via the 'extract' method if supported by the backend.\"\n        )\n\n    def build_schema(self, fields):\n        \"\"\"\n        Takes a dictionary of fields and returns schema information.\n\n        This method MUST be implemented by each backend, as it will be highly\n        specific to each one.\n        \"\"\"\n        raise NotImplementedError(\n            \"Subclasses must provide a way to build their schema.\"\n        )\n\n    def build_models_list(self):\n        \"\"\"\n        Builds a list of models for searching.\n\n        The ``search`` method should use this and the ``django_ct`` field to\n        narrow the results (unless the user indicates not to). This helps ignore\n        any results that are not currently handled models and ensures\n        consistent caching.\n        \"\"\"\n        from haystack import connections\n\n        models = []\n\n        for model in (\n            connections[self.connection_alias].get_unified_index().get_indexed_models()\n        ):\n            models.append(get_model_ct(model))\n\n        return models\n\n\n# Alias for easy loading within SearchQuery objects.\nSearchBackend = BaseSearchBackend\n\n\nclass SearchNode(tree.Node):\n    \"\"\"\n    Manages an individual condition within a query.\n\n    Most often, this will be a lookup to ensure that a certain word or phrase\n    appears in the documents being indexed. However, it also supports filtering\n    types (such as 'lt', 'gt', 'in' and others) for more complex lookups.\n\n    This object creates a tree, with children being a list of either more\n    ``SQ`` objects or the expressions/values themselves.\n    \"\"\"\n\n    AND = \"AND\"\n    OR = \"OR\"\n    default = AND\n\n    # Start compat. Django 1.6 changed how ``tree.Node`` works, so we're going\n    # to patch back in the original implementation until time to rewrite this\n    # presents itself.\n    # See https://github.com/django/django/commit/d3f00bd.\n\n    def __init__(self, children=None, connector=None, negated=False):\n        \"\"\"\n        Constructs a new Node. If no connector is given, the default will be\n        used.\n\n        Warning: You probably don't want to pass in the 'negated' parameter. It\n        is NOT the same as constructing a node and calling negate() on the\n        result.\n        \"\"\"\n        self.children = children and children[:] or []\n        self.connector = connector or self.default\n        self.subtree_parents = []\n        self.negated = negated\n\n    # We need this because of django.db.models.query_utils.Q. Q. __init__() is\n    # problematic, but it is a natural Node subclass in all other respects.\n    def _new_instance(cls, children=None, connector=None, negated=False):\n        \"\"\"\n        This is called to create a new instance of this class when we need new\n        Nodes (or subclasses) in the internal code in this class. Normally, it\n        just shadows __init__(). However, subclasses with an __init__ signature\n        that is not an extension of Node.__init__ might need to implement this\n        method to allow a Node to create a new instance of them (if they have\n        any extra setting up to do).\n        \"\"\"\n        obj = SearchNode(children, connector, negated)\n        obj.__class__ = cls\n        return obj\n\n    _new_instance = classmethod(_new_instance)\n\n    def __str__(self):\n        if self.negated:\n            return \"(NOT (%s: %s))\" % (\n                self.connector,\n                \", \".join([str(c) for c in self.children]),\n            )\n        return \"(%s: %s)\" % (self.connector, \", \".join([str(c) for c in self.children]))\n\n    def __deepcopy__(self, memodict):\n        \"\"\"\n        Utility method used by copy.deepcopy().\n        \"\"\"\n        obj = SearchNode(connector=self.connector, negated=self.negated)\n        obj.__class__ = self.__class__\n        obj.children = copy.deepcopy(self.children, memodict)\n        obj.subtree_parents = copy.deepcopy(self.subtree_parents, memodict)\n        return obj\n\n    def __len__(self):\n        \"\"\"\n        The size of a node if the number of children it has.\n        \"\"\"\n        return len(self.children)\n\n    def __bool__(self):\n        \"\"\"\n        For truth value testing.\n        \"\"\"\n        return bool(self.children)\n\n    def __contains__(self, other):\n        \"\"\"\n        Returns True is 'other' is a direct child of this instance.\n        \"\"\"\n        return other in self.children\n\n    def add(self, node, conn_type):\n        \"\"\"\n        Adds a new node to the tree. If the conn_type is the same as the root's\n        current connector type, the node is added to the first level.\n        Otherwise, the whole tree is pushed down one level and a new root\n        connector is created, connecting the existing tree and the new node.\n        \"\"\"\n        if node in self.children and conn_type == self.connector:\n            return\n        if len(self.children) < 2:\n            self.connector = conn_type\n        if self.connector == conn_type:\n            if isinstance(node, SearchNode) and (\n                node.connector == conn_type or len(node) == 1\n            ):\n                self.children.extend(node.children)\n            else:\n                self.children.append(node)\n        else:\n            obj = self._new_instance(self.children, self.connector, self.negated)\n            self.connector = conn_type\n            self.children = [obj, node]\n\n    def negate(self):\n        \"\"\"\n        Negate the sense of the root connector. This reorganises the children\n        so that the current node has a single child: a negated node containing\n        all the previous children. This slightly odd construction makes adding\n        new children behave more intuitively.\n\n        Interpreting the meaning of this negate is up to client code. This\n        method is useful for implementing \"not\" arrangements.\n        \"\"\"\n        self.children = [\n            self._new_instance(self.children, self.connector, not self.negated)\n        ]\n        self.connector = self.default\n\n    def start_subtree(self, conn_type):\n        \"\"\"\n        Sets up internal state so that new nodes are added to a subtree of the\n        current node. The conn_type specifies how the sub-tree is joined to the\n        existing children.\n        \"\"\"\n        if len(self.children) == 1:\n            self.connector = conn_type\n        elif self.connector != conn_type:\n            self.children = [\n                self._new_instance(self.children, self.connector, self.negated)\n            ]\n            self.connector = conn_type\n            self.negated = False\n\n        self.subtree_parents.append(\n            self.__class__(self.children, self.connector, self.negated)\n        )\n        self.connector = self.default\n        self.negated = False\n        self.children = []\n\n    def end_subtree(self):\n        \"\"\"\n        Closes off the most recently unmatched start_subtree() call.\n\n        This puts the current state into a node of the parent tree and returns\n        the current instances state to be the parent.\n        \"\"\"\n        obj = self.subtree_parents.pop()\n        node = self.__class__(self.children, self.connector)\n        self.connector = obj.connector\n        self.negated = obj.negated\n        self.children = obj.children\n        self.children.append(node)\n\n    # End compat.\n\n    def __repr__(self):\n        return \"<SQ: %s %s>\" % (\n            self.connector,\n            self.as_query_string(self._repr_query_fragment_callback),\n        )\n\n    def _repr_query_fragment_callback(self, field, filter_type, value):\n        return \"%s%s%s=%s\" % (field, FILTER_SEPARATOR, filter_type, force_str(value))\n\n    def as_query_string(self, query_fragment_callback):\n        \"\"\"\n        Produces a portion of the search query from the current SQ and its\n        children.\n        \"\"\"\n        result = []\n\n        for child in self.children:\n            if hasattr(child, \"as_query_string\"):\n                result.append(child.as_query_string(query_fragment_callback))\n            else:\n                expression, value = child\n                field, filter_type = self.split_expression(expression)\n                result.append(query_fragment_callback(field, filter_type, value))\n\n        conn = \" %s \" % self.connector\n        query_string = conn.join(result)\n\n        if query_string:\n            if self.negated:\n                query_string = \"NOT (%s)\" % query_string\n            elif len(self.children) != 1:\n                query_string = \"(%s)\" % query_string\n\n        return query_string\n\n    def split_expression(self, expression):\n        \"\"\"Parses an expression and determines the field and filter type.\"\"\"\n        parts = expression.split(FILTER_SEPARATOR)\n        field = parts[0]\n        if len(parts) == 1 or parts[-1] not in VALID_FILTERS:\n            filter_type = \"content\"\n        else:\n            filter_type = parts.pop()\n\n        return (field, filter_type)\n\n\nclass SQ(Q, SearchNode):\n    \"\"\"\n    Manages an individual condition within a query.\n\n    Most often, this will be a lookup to ensure that a certain word or phrase\n    appears in the documents being indexed. However, it also supports filtering\n    types (such as 'lt', 'gt', 'in' and others) for more complex lookups.\n    \"\"\"\n\n    pass\n\n\nclass BaseSearchQuery(object):\n    \"\"\"\n    A base class for handling the query itself.\n\n    This class acts as an intermediary between the ``SearchQuerySet`` and the\n    ``SearchBackend`` itself.\n\n    The ``SearchQuery`` object maintains a tree of ``SQ`` objects. Each ``SQ``\n    object supports what field it looks up against, what kind of lookup (i.e.\n    the __'s), what value it's looking for, if it's a AND/OR/NOT and tracks\n    any children it may have. The ``SearchQuery.build_query`` method starts with\n    the root of the tree, building part of the final query at each node until\n    the full final query is ready for the ``SearchBackend``.\n\n    Backends should extend this class and provide implementations for\n    ``build_query_fragment``, ``clean`` and ``run``. See the ``solr`` backend for an example\n    implementation.\n    \"\"\"\n\n    def __init__(self, using=DEFAULT_ALIAS):\n        self.query_filter = SearchNode()\n        self.order_by = []\n        self.models = set()\n        self.boost = {}\n        self.start_offset = 0\n        self.end_offset = None\n        self.highlight = False\n        self.facets = {}\n        self.date_facets = {}\n        self.query_facets = []\n        self.narrow_queries = set()\n        #: If defined, fields should be a list of field names - no other values\n        #: will be retrieved so the caller must be careful to include django_ct\n        #: and django_id when using code which expects those to be included in\n        #: the results\n        self.fields = []\n        # Geospatial-related information\n        self.within = {}\n        self.dwithin = {}\n        self.distance_point = {}\n        # Internal.\n        self._raw_query = None\n        self._raw_query_params = {}\n        self._more_like_this = False\n        self._mlt_instance = None\n        self._results = None\n        self._hit_count = None\n        self._facet_counts = None\n        self._stats = None\n        self._spelling_suggestion = SPELLING_SUGGESTION_HAS_NOT_RUN\n        self.spelling_query = None\n        self.result_class = SearchResult\n        self.stats = {}\n        from haystack import connections\n\n        self._using = using\n        self.backend = connections[self._using].get_backend()\n\n    def __str__(self):\n        return self.build_query()\n\n    def __getstate__(self):\n        \"\"\"For pickling.\"\"\"\n        obj_dict = self.__dict__.copy()\n        del obj_dict[\"backend\"]\n        return obj_dict\n\n    def __setstate__(self, obj_dict):\n        \"\"\"For unpickling.\"\"\"\n        from haystack import connections\n\n        self.__dict__.update(obj_dict)\n        self.backend = connections[self._using].get_backend()\n\n    def has_run(self):\n        \"\"\"Indicates if any query has been been run.\"\"\"\n        return None not in (self._results, self._hit_count)\n\n    def build_params(self, spelling_query=None):\n        \"\"\"Generates a list of params to use when searching.\"\"\"\n        kwargs = {\"start_offset\": self.start_offset}\n\n        if self.order_by:\n            kwargs[\"sort_by\"] = self.order_by\n\n        if self.end_offset is not None:\n            kwargs[\"end_offset\"] = self.end_offset\n\n        if self.highlight:\n            kwargs[\"highlight\"] = self.highlight\n\n        if self.facets:\n            kwargs[\"facets\"] = self.facets\n\n        if self.date_facets:\n            kwargs[\"date_facets\"] = self.date_facets\n\n        if self.query_facets:\n            kwargs[\"query_facets\"] = self.query_facets\n\n        if self.narrow_queries:\n            kwargs[\"narrow_queries\"] = self.narrow_queries\n\n        if spelling_query:\n            kwargs[\"spelling_query\"] = spelling_query\n        elif self.spelling_query:\n            kwargs[\"spelling_query\"] = self.spelling_query\n\n        if self.boost:\n            kwargs[\"boost\"] = self.boost\n\n        if self.within:\n            kwargs[\"within\"] = self.within\n\n        if self.dwithin:\n            kwargs[\"dwithin\"] = self.dwithin\n\n        if self.distance_point:\n            kwargs[\"distance_point\"] = self.distance_point\n\n        if self.result_class:\n            kwargs[\"result_class\"] = self.result_class\n\n        if self.fields:\n            kwargs[\"fields\"] = self.fields\n\n        if self.models:\n            kwargs[\"models\"] = self.models\n\n        return kwargs\n\n    def run(self, spelling_query=None, **kwargs):\n        \"\"\"Builds and executes the query. Returns a list of search results.\"\"\"\n        final_query = self.build_query()\n        search_kwargs = self.build_params(spelling_query=spelling_query)\n\n        if kwargs:\n            search_kwargs.update(kwargs)\n\n        results = self.backend.search(final_query, **search_kwargs)\n        self._results = results.get(\"results\", [])\n        self._hit_count = results.get(\"hits\", 0)\n        self._facet_counts = self.post_process_facets(results)\n        self._spelling_suggestion = results.get(\"spelling_suggestion\", None)\n\n    def run_mlt(self, **kwargs):\n        \"\"\"\n        Executes the More Like This. Returns a list of search results similar\n        to the provided document (and optionally query).\n        \"\"\"\n        if self._more_like_this is False or self._mlt_instance is None:\n            raise MoreLikeThisError(\n                \"No instance was provided to determine 'More Like This' results.\"\n            )\n\n        search_kwargs = {\"result_class\": self.result_class}\n\n        if self.models:\n            search_kwargs[\"models\"] = self.models\n\n        if kwargs:\n            search_kwargs.update(kwargs)\n\n        additional_query_string = self.build_query()\n        results = self.backend.more_like_this(\n            self._mlt_instance, additional_query_string, **search_kwargs\n        )\n        self._results = results.get(\"results\", [])\n        self._hit_count = results.get(\"hits\", 0)\n\n    def run_raw(self, **kwargs):\n        \"\"\"Executes a raw query. Returns a list of search results.\"\"\"\n        search_kwargs = self.build_params()\n        search_kwargs.update(self._raw_query_params)\n\n        if kwargs:\n            search_kwargs.update(kwargs)\n\n        results = self.backend.search(self._raw_query, **search_kwargs)\n        self._results = results.get(\"results\", [])\n        self._hit_count = results.get(\"hits\", 0)\n        self._facet_counts = results.get(\"facets\", {})\n        self._spelling_suggestion = results.get(\"spelling_suggestion\", None)\n\n    def get_count(self):\n        \"\"\"\n        Returns the number of results the backend found for the query.\n\n        If the query has not been run, this will execute the query and store\n        the results.\n        \"\"\"\n        if self._hit_count is None:\n            # Limit the slice to 1 so we get a count without consuming\n            # everything.\n            if not self.end_offset:\n                self.end_offset = 1\n\n            if self._more_like_this:\n                # Special case for MLT.\n                self.run_mlt()\n            elif self._raw_query:\n                # Special case for raw queries.\n                self.run_raw()\n            else:\n                self.run()\n\n        return self._hit_count\n\n    def get_results(self, **kwargs):\n        \"\"\"\n        Returns the results received from the backend.\n\n        If the query has not been run, this will execute the query and store\n        the results.\n        \"\"\"\n        if self._results is None:\n            if self._more_like_this:\n                # Special case for MLT.\n                self.run_mlt(**kwargs)\n            elif self._raw_query:\n                # Special case for raw queries.\n                self.run_raw(**kwargs)\n            else:\n                self.run(**kwargs)\n\n        return self._results\n\n    def get_facet_counts(self):\n        \"\"\"\n        Returns the facet counts received from the backend.\n\n        If the query has not been run, this will execute the query and store\n        the results.\n        \"\"\"\n        if self._facet_counts is None:\n            self.run()\n\n        return self._facet_counts\n\n    def get_stats(self):\n        \"\"\"\n        Returns the stats received from the backend.\n\n        If the query has not been run, this will execute the query and store\n        the results\n        \"\"\"\n        if self._stats is None:\n            self.run()\n        return self._stats\n\n    def set_spelling_query(self, spelling_query):\n        self.spelling_query = spelling_query\n\n    def get_spelling_suggestion(self, preferred_query=None):\n        \"\"\"\n        Returns the spelling suggestion received from the backend.\n\n        If the query has not been run, this will execute the query and store\n        the results.\n        \"\"\"\n        if self._spelling_suggestion is SPELLING_SUGGESTION_HAS_NOT_RUN:\n            self.run(spelling_query=preferred_query)\n\n        return self._spelling_suggestion\n\n    def boost_fragment(self, boost_word, boost_value):\n        \"\"\"Generates query fragment for boosting a single word/value pair.\"\"\"\n        return \"%s^%s\" % (boost_word, boost_value)\n\n    def matching_all_fragment(self):\n        \"\"\"Generates the query that matches all documents.\"\"\"\n        return \"*\"\n\n    def build_query(self):\n        \"\"\"\n        Interprets the collected query metadata and builds the final query to\n        be sent to the backend.\n        \"\"\"\n        final_query = self.query_filter.as_query_string(self.build_query_fragment)\n\n        if not final_query:\n            # Match all.\n            final_query = self.matching_all_fragment()\n\n        if self.boost:\n            boost_list = []\n\n            for boost_word, boost_value in self.boost.items():\n                boost_list.append(self.boost_fragment(boost_word, boost_value))\n\n            final_query = \"%s %s\" % (final_query, \" \".join(boost_list))\n\n        return final_query\n\n    def combine(self, rhs, connector=SQ.AND):\n        if connector == SQ.AND:\n            self.add_filter(rhs.query_filter)\n        elif connector == SQ.OR:\n            self.add_filter(rhs.query_filter, use_or=True)\n\n    # Methods for backends to implement.\n\n    def build_query_fragment(self, field, filter_type, value):\n        \"\"\"\n        Generates a query fragment from a field, filter type and a value.\n\n        Must be implemented in backends as this will be highly backend specific.\n        \"\"\"\n        raise NotImplementedError(\n            \"Subclasses must provide a way to generate query fragments via the 'build_query_fragment' method.\"\n        )\n\n    # Standard methods to alter the query.\n\n    def clean(self, query_fragment):\n        \"\"\"\n        Provides a mechanism for sanitizing user input before presenting the\n        value to the backend.\n\n        A basic (override-able) implementation is provided.\n        \"\"\"\n        if not isinstance(query_fragment, str):\n            return query_fragment\n\n        words = query_fragment.split()\n        cleaned_words = []\n\n        for word in words:\n            if word in self.backend.RESERVED_WORDS:\n                word = word.replace(word, word.lower())\n\n            for char in self.backend.RESERVED_CHARACTERS:\n                word = word.replace(char, \"\\\\%s\" % char)\n\n            cleaned_words.append(word)\n\n        return \" \".join(cleaned_words)\n\n    def build_not_query(self, query_string):\n        if \" \" in query_string:\n            query_string = \"(%s)\" % query_string\n\n        return \"NOT %s\" % query_string\n\n    def build_exact_query(self, query_string):\n        return '\"%s\"' % query_string\n\n    def add_filter(self, query_filter, use_or=False):\n        \"\"\"\n        Adds a SQ to the current query.\n        \"\"\"\n        if use_or:\n            connector = SQ.OR\n        else:\n            connector = SQ.AND\n\n        if (\n            self.query_filter\n            and query_filter.connector != connector\n            and len(query_filter) > 1\n        ):\n            self.query_filter.start_subtree(connector)\n            subtree = True\n        else:\n            subtree = False\n\n        for child in query_filter.children:\n            if isinstance(child, tree.Node):\n                self.query_filter.start_subtree(connector)\n                self.add_filter(child)\n                self.query_filter.end_subtree()\n            else:\n                expression, value = child\n                self.query_filter.add((expression, value), connector)\n\n            connector = query_filter.connector\n\n        if query_filter.negated:\n            self.query_filter.negate()\n\n        if subtree:\n            self.query_filter.end_subtree()\n\n    def add_order_by(self, field):\n        \"\"\"Orders the search result by a field.\"\"\"\n        self.order_by.append(field)\n\n    def clear_order_by(self):\n        \"\"\"\n        Clears out all ordering that has been already added, reverting the\n        query to relevancy.\n        \"\"\"\n        self.order_by = []\n\n    def add_model(self, model):\n        \"\"\"\n        Restricts the query requiring matches in the given model.\n\n        This builds upon previous additions, so you can limit to multiple models\n        by chaining this method several times.\n        \"\"\"\n        if not isinstance(model, ModelBase):\n            raise AttributeError(\n                \"The model being added to the query must derive from Model.\"\n            )\n\n        self.models.add(model)\n\n    def set_limits(self, low=None, high=None):\n        \"\"\"Restricts the query by altering either the start, end or both offsets.\"\"\"\n        if low is not None:\n            self.start_offset = int(low)\n\n        if high is not None:\n            self.end_offset = int(high)\n\n    def clear_limits(self):\n        \"\"\"Clears any existing limits.\"\"\"\n        self.start_offset, self.end_offset = 0, None\n\n    def add_boost(self, term, boost_value):\n        \"\"\"Adds a boosted term and the amount to boost it to the query.\"\"\"\n        self.boost[term] = boost_value\n\n    def raw_search(self, query_string, **kwargs):\n        \"\"\"\n        Runs a raw query (no parsing) against the backend.\n\n        This method causes the SearchQuery to ignore the standard query\n        generating facilities, running only what was provided instead.\n\n        Note that any kwargs passed along will override anything provided\n        to the rest of the ``SearchQuerySet``.\n        \"\"\"\n        self._raw_query = query_string\n        self._raw_query_params = kwargs\n\n    def more_like_this(self, model_instance):\n        \"\"\"\n        Allows backends with support for \"More Like This\" to return results\n        similar to the provided instance.\n        \"\"\"\n        self._more_like_this = True\n        self._mlt_instance = model_instance\n\n    def add_stats_query(self, stats_field, stats_facets):\n        \"\"\"Adds stats and stats_facets queries for the Solr backend.\"\"\"\n        self.stats[stats_field] = stats_facets\n\n    def add_highlight(self, **kwargs):\n        \"\"\"Adds highlighting to the search results.\"\"\"\n        self.highlight = kwargs or True\n\n    def add_within(self, field, point_1, point_2):\n        \"\"\"Adds bounding box parameters to search query.\"\"\"\n        from haystack.utils.geo import ensure_point\n\n        self.within = {\n            \"field\": field,\n            \"point_1\": ensure_point(point_1),\n            \"point_2\": ensure_point(point_2),\n        }\n\n    def add_dwithin(self, field, point, distance):\n        \"\"\"Adds radius-based parameters to search query.\"\"\"\n        from haystack.utils.geo import ensure_distance, ensure_point\n\n        self.dwithin = {\n            \"field\": field,\n            \"point\": ensure_point(point),\n            \"distance\": ensure_distance(distance),\n        }\n\n    def add_distance(self, field, point):\n        \"\"\"\n        Denotes that results should include distance measurements from the\n        point passed in.\n        \"\"\"\n        from haystack.utils.geo import ensure_point\n\n        self.distance_point = {\"field\": field, \"point\": ensure_point(point)}\n\n    def add_field_facet(self, field, **options):\n        \"\"\"Adds a regular facet on a field.\"\"\"\n        from haystack import connections\n\n        field_name = (\n            connections[self._using].get_unified_index().get_facet_fieldname(field)\n        )\n        self.facets[field_name] = options.copy()\n\n    def add_date_facet(self, field, start_date, end_date, gap_by, gap_amount=1):\n        \"\"\"Adds a date-based facet on a field.\"\"\"\n        from haystack import connections\n\n        if gap_by not in VALID_GAPS:\n            raise FacetingError(\n                \"The gap_by ('%s') must be one of the following: %s.\"\n                % (gap_by, \", \".join(VALID_GAPS))\n            )\n\n        details = {\n            \"start_date\": start_date,\n            \"end_date\": end_date,\n            \"gap_by\": gap_by,\n            \"gap_amount\": gap_amount,\n        }\n        self.date_facets[\n            connections[self._using].get_unified_index().get_facet_fieldname(field)\n        ] = details\n\n    def add_query_facet(self, field, query):\n        \"\"\"Adds a query facet on a field.\"\"\"\n        from haystack import connections\n\n        self.query_facets.append(\n            (\n                connections[self._using].get_unified_index().get_facet_fieldname(field),\n                query,\n            )\n        )\n\n    def add_narrow_query(self, query):\n        \"\"\"\n        Narrows a search to a subset of all documents per the query.\n\n        Generally used in conjunction with faceting.\n        \"\"\"\n        self.narrow_queries.add(query)\n\n    def set_result_class(self, klass):\n        \"\"\"\n        Sets the result class to use for results.\n\n        Overrides any previous usages. If ``None`` is provided, Haystack will\n        revert back to the default ``SearchResult`` object.\n        \"\"\"\n        if klass is None:\n            klass = SearchResult\n\n        self.result_class = klass\n\n    def post_process_facets(self, results):\n        # Handle renaming the facet fields. Undecorate and all that.\n        from haystack import connections\n\n        revised_facets = {}\n        field_data = connections[self._using].get_unified_index().all_searchfields()\n\n        for facet_type, field_details in results.get(\"facets\", {}).items():\n            temp_facets = {}\n\n            for field, field_facets in field_details.items():\n                fieldname = field\n                if field in field_data and hasattr(\n                    field_data[field], \"get_facet_for_name\"\n                ):\n                    fieldname = field_data[field].get_facet_for_name()\n\n                temp_facets[fieldname] = field_facets\n\n            revised_facets[facet_type] = temp_facets\n\n        return revised_facets\n\n    def using(self, using=None):\n        \"\"\"\n        Allows for overriding which connection should be used. This\n        disables the use of routers when performing the query.\n\n        If ``None`` is provided, it has no effect on what backend is used.\n        \"\"\"\n        return self._clone(using=using)\n\n    def _reset(self):\n        \"\"\"\n        Resets the instance's internal state to appear as though no query has\n        been run before. Only need to tweak a few variables we check.\n        \"\"\"\n        self._results = None\n        self._hit_count = None\n        self._facet_counts = None\n        self._spelling_suggestion = SPELLING_SUGGESTION_HAS_NOT_RUN\n\n    def _clone(self, klass=None, using=None):\n        if using is None:\n            using = self._using\n        else:\n            from haystack import connections\n\n            klass = connections[using].query\n\n        if klass is None:\n            klass = self.__class__\n\n        clone = klass(using=using)\n        clone.query_filter = deepcopy(self.query_filter)\n        clone.order_by = self.order_by[:]\n        clone.models = self.models.copy()\n        clone.boost = self.boost.copy()\n        clone.highlight = self.highlight\n        clone.stats = self.stats.copy()\n        clone.facets = self.facets.copy()\n        clone.date_facets = self.date_facets.copy()\n        clone.query_facets = self.query_facets[:]\n        clone.narrow_queries = self.narrow_queries.copy()\n        clone.start_offset = self.start_offset\n        clone.end_offset = self.end_offset\n        clone.result_class = self.result_class\n        clone.within = self.within.copy()\n        clone.dwithin = self.dwithin.copy()\n        clone.distance_point = self.distance_point.copy()\n        clone._raw_query = self._raw_query\n        clone._raw_query_params = self._raw_query_params\n        clone.spelling_query = self.spelling_query\n        clone._more_like_this = self._more_like_this\n        clone._mlt_instance = self._mlt_instance\n\n        return clone\n\n\nclass BaseEngine(object):\n    backend = BaseSearchBackend\n    query = BaseSearchQuery\n    unified_index = UnifiedIndex\n\n    def __init__(self, using=None):\n        if using is None:\n            using = DEFAULT_ALIAS\n\n        self.using = using\n        self.options = settings.HAYSTACK_CONNECTIONS.get(self.using, {})\n        self.queries = []\n        self._index = None\n        self._backend = None\n\n    def get_backend(self):\n        if self._backend is None:\n            self._backend = self.backend(self.using, **self.options)\n        return self._backend\n\n    def reset_sessions(self):\n        \"\"\"Reset any transient connections, file handles, etc.\"\"\"\n        self._backend = None\n\n    def get_query(self):\n        return self.query(using=self.using)\n\n    def reset_queries(self):\n        del self.queries[:]\n\n    def get_unified_index(self):\n        if self._index is None:\n            self._index = self.unified_index(self.options.get(\"EXCLUDED_INDEXES\", []))\n\n        return self._index\n"
  },
  {
    "path": "haystack/backends/elasticsearch2_backend.py",
    "content": "import datetime\nimport warnings\n\nfrom django.conf import settings\n\nfrom haystack.backends import BaseEngine\nfrom haystack.backends.elasticsearch_backend import (\n    ElasticsearchSearchBackend,\n    ElasticsearchSearchQuery,\n)\nfrom haystack.constants import DJANGO_CT\nfrom haystack.exceptions import MissingDependency\nfrom haystack.utils import get_identifier, get_model_ct\n\ntry:\n    import elasticsearch\n\n    if not ((2, 0, 0) <= elasticsearch.__version__ < (3, 0, 0)):\n        raise ImportError\n    from elasticsearch.helpers import bulk, scan\n\n    warnings.warn(\n        \"ElasticSearch 2.x support deprecated, will be removed in 4.0\",\n        DeprecationWarning,\n    )\nexcept ImportError:\n    raise MissingDependency(\n        \"The 'elasticsearch2' backend requires the \\\n                            installation of 'elasticsearch>=2.0.0,<3.0.0'. \\\n                            Please refer to the documentation.\"\n    )\n\n\nclass Elasticsearch2SearchBackend(ElasticsearchSearchBackend):\n    def __init__(self, connection_alias, **connection_options):\n        super().__init__(connection_alias, **connection_options)\n        self.content_field_name = None\n\n    def clear(self, models=None, commit=True):\n        \"\"\"\n        Clears the backend of all documents/objects for a collection of models.\n\n        :param models: List or tuple of models to clear.\n        :param commit: Not used.\n        \"\"\"\n        if models is not None:\n            assert isinstance(models, (list, tuple))\n\n        try:\n            if models is None:\n                self.conn.indices.delete(index=self.index_name, ignore=404)\n                self.setup_complete = False\n                self.existing_mapping = {}\n                self.content_field_name = None\n            else:\n                models_to_delete = []\n\n                for model in models:\n                    models_to_delete.append(\"%s:%s\" % (DJANGO_CT, get_model_ct(model)))\n\n                # Delete using scroll API\n                query = {\n                    \"query\": {\"query_string\": {\"query\": \" OR \".join(models_to_delete)}}\n                }\n                generator = scan(\n                    self.conn,\n                    query=query,\n                    index=self.index_name,\n                    doc_type=\"modelresult\",\n                )\n                actions = (\n                    {\"_op_type\": \"delete\", \"_id\": doc[\"_id\"]} for doc in generator\n                )\n                bulk(\n                    self.conn,\n                    actions=actions,\n                    index=self.index_name,\n                    doc_type=\"modelresult\",\n                )\n                self.conn.indices.refresh(index=self.index_name)\n\n        except elasticsearch.TransportError as e:\n            if not self.silently_fail:\n                raise\n\n            if models is not None:\n                self.log.error(\n                    \"Failed to clear Elasticsearch index of models '%s': %s\",\n                    \",\".join(models_to_delete),\n                    e,\n                    exc_info=True,\n                )\n            else:\n                self.log.error(\n                    \"Failed to clear Elasticsearch index: %s\", e, exc_info=True\n                )\n\n    def build_search_kwargs(\n        self,\n        query_string,\n        sort_by=None,\n        start_offset=0,\n        end_offset=None,\n        fields=\"\",\n        highlight=False,\n        facets=None,\n        date_facets=None,\n        query_facets=None,\n        narrow_queries=None,\n        spelling_query=None,\n        within=None,\n        dwithin=None,\n        distance_point=None,\n        models=None,\n        limit_to_registered_models=None,\n        result_class=None,\n    ):\n        kwargs = super().build_search_kwargs(\n            query_string,\n            sort_by,\n            start_offset,\n            end_offset,\n            fields,\n            highlight,\n            spelling_query=spelling_query,\n            within=within,\n            dwithin=dwithin,\n            distance_point=distance_point,\n            models=models,\n            limit_to_registered_models=limit_to_registered_models,\n            result_class=result_class,\n        )\n\n        filters = []\n        if start_offset is not None:\n            kwargs[\"from\"] = start_offset\n\n        if end_offset is not None:\n            kwargs[\"size\"] = end_offset - start_offset\n\n        if narrow_queries is None:\n            narrow_queries = set()\n\n        if facets is not None:\n            kwargs.setdefault(\"aggs\", {})\n\n            for facet_fieldname, extra_options in facets.items():\n                facet_options = {\n                    \"meta\": {\"_type\": \"terms\"},\n                    \"terms\": {\"field\": facet_fieldname},\n                }\n                if \"order\" in extra_options:\n                    facet_options[\"meta\"][\"order\"] = extra_options.pop(\"order\")\n                # Special cases for options applied at the facet level (not the terms level).\n                if extra_options.pop(\"global_scope\", False):\n                    # Renamed \"global_scope\" since \"global\" is a python keyword.\n                    facet_options[\"global\"] = True\n                if \"facet_filter\" in extra_options:\n                    facet_options[\"facet_filter\"] = extra_options.pop(\"facet_filter\")\n                facet_options[\"terms\"].update(extra_options)\n                kwargs[\"aggs\"][facet_fieldname] = facet_options\n\n        if date_facets is not None:\n            kwargs.setdefault(\"aggs\", {})\n\n            for facet_fieldname, value in date_facets.items():\n                # Need to detect on gap_by & only add amount if it's more than one.\n                interval = value.get(\"gap_by\").lower()\n\n                # Need to detect on amount (can't be applied on months or years).\n                if value.get(\"gap_amount\", 1) != 1 and interval not in (\n                    \"month\",\n                    \"year\",\n                ):\n                    # Just the first character is valid for use.\n                    interval = \"%s%s\" % (value[\"gap_amount\"], interval[:1])\n\n                kwargs[\"aggs\"][facet_fieldname] = {\n                    \"meta\": {\"_type\": \"date_histogram\"},\n                    \"date_histogram\": {\"field\": facet_fieldname, \"interval\": interval},\n                    \"aggs\": {\n                        facet_fieldname: {\n                            \"date_range\": {\n                                \"field\": facet_fieldname,\n                                \"ranges\": [\n                                    {\n                                        \"from\": self._from_python(\n                                            value.get(\"start_date\")\n                                        ),\n                                        \"to\": self._from_python(value.get(\"end_date\")),\n                                    }\n                                ],\n                            }\n                        }\n                    },\n                }\n\n        if query_facets is not None:\n            kwargs.setdefault(\"aggs\", {})\n\n            for facet_fieldname, value in query_facets:\n                kwargs[\"aggs\"][facet_fieldname] = {\n                    \"meta\": {\"_type\": \"query\"},\n                    \"filter\": {\"query_string\": {\"query\": value}},\n                }\n\n        for q in narrow_queries:\n            filters.append({\"query_string\": {\"query\": q}})\n\n        # if we want to filter, change the query type to filteres\n        if filters:\n            kwargs[\"query\"] = {\"filtered\": {\"query\": kwargs.pop(\"query\")}}\n            filtered = kwargs[\"query\"][\"filtered\"]\n            if \"filter\" in filtered:\n                if \"bool\" in filtered[\"filter\"].keys():\n                    another_filters = kwargs[\"query\"][\"filtered\"][\"filter\"][\"bool\"][\n                        \"must\"\n                    ]\n                else:\n                    another_filters = [kwargs[\"query\"][\"filtered\"][\"filter\"]]\n            else:\n                another_filters = filters\n\n            if len(another_filters) == 1:\n                kwargs[\"query\"][\"filtered\"][\"filter\"] = another_filters[0]\n            else:\n                kwargs[\"query\"][\"filtered\"][\"filter\"] = {\n                    \"bool\": {\"must\": another_filters}\n                }\n\n        return kwargs\n\n    def more_like_this(\n        self,\n        model_instance,\n        additional_query_string=None,\n        start_offset=0,\n        end_offset=None,\n        models=None,\n        limit_to_registered_models=None,\n        result_class=None,\n        **kwargs\n    ):\n        from haystack import connections\n\n        if not self.setup_complete:\n            self.setup()\n\n        # Deferred models will have a different class (\"RealClass_Deferred_fieldname\")\n        # which won't be in our registry:\n        model_klass = model_instance._meta.concrete_model\n\n        index = (\n            connections[self.connection_alias]\n            .get_unified_index()\n            .get_index(model_klass)\n        )\n        field_name = index.get_content_field()\n        params = {}\n\n        if start_offset is not None:\n            params[\"from_\"] = start_offset\n\n        if end_offset is not None:\n            params[\"size\"] = end_offset - start_offset\n\n        doc_id = get_identifier(model_instance)\n\n        try:\n            # More like this Query\n            # https://www.elastic.co/guide/en/elasticsearch/reference/2.2/query-dsl-mlt-query.html\n            mlt_query = {\n                \"query\": {\n                    \"more_like_this\": {\n                        \"fields\": [field_name],\n                        \"like\": [{\"_id\": doc_id}],\n                    }\n                }\n            }\n\n            narrow_queries = []\n\n            if additional_query_string and additional_query_string != \"*:*\":\n                additional_filter = {\n                    \"query\": {\"query_string\": {\"query\": additional_query_string}}\n                }\n                narrow_queries.append(additional_filter)\n\n            if limit_to_registered_models is None:\n                limit_to_registered_models = getattr(\n                    settings, \"HAYSTACK_LIMIT_TO_REGISTERED_MODELS\", True\n                )\n\n            if models and len(models):\n                model_choices = sorted(get_model_ct(model) for model in models)\n            elif limit_to_registered_models:\n                # Using narrow queries, limit the results to only models handled\n                # with the current routers.\n                model_choices = self.build_models_list()\n            else:\n                model_choices = []\n\n            if len(model_choices) > 0:\n                model_filter = {\"terms\": {DJANGO_CT: model_choices}}\n                narrow_queries.append(model_filter)\n\n            if len(narrow_queries) > 0:\n                mlt_query = {\n                    \"query\": {\n                        \"filtered\": {\n                            \"query\": mlt_query[\"query\"],\n                            \"filter\": {\"bool\": {\"must\": list(narrow_queries)}},\n                        }\n                    }\n                }\n\n            raw_results = self.conn.search(\n                body=mlt_query,\n                index=self.index_name,\n                doc_type=\"modelresult\",\n                _source=True,\n                **params\n            )\n        except elasticsearch.TransportError as e:\n            if not self.silently_fail:\n                raise\n\n            self.log.error(\n                \"Failed to fetch More Like This from Elasticsearch for document '%s': %s\",\n                doc_id,\n                e,\n                exc_info=True,\n            )\n            raw_results = {}\n\n        return self._process_results(raw_results, result_class=result_class)\n\n    def _process_results(\n        self,\n        raw_results,\n        highlight=False,\n        result_class=None,\n        distance_point=None,\n        geo_sort=False,\n    ):\n        results = super()._process_results(\n            raw_results, highlight, result_class, distance_point, geo_sort\n        )\n        facets = {}\n        if \"aggregations\" in raw_results:\n            facets = {\"fields\": {}, \"dates\": {}, \"queries\": {}}\n\n            for facet_fieldname, facet_info in raw_results[\"aggregations\"].items():\n                facet_type = facet_info[\"meta\"][\"_type\"]\n                if facet_type == \"terms\":\n                    facets[\"fields\"][facet_fieldname] = [\n                        (individual[\"key\"], individual[\"doc_count\"])\n                        for individual in facet_info[\"buckets\"]\n                    ]\n                    if \"order\" in facet_info[\"meta\"]:\n                        if facet_info[\"meta\"][\"order\"] == \"reverse_count\":\n                            srt = sorted(\n                                facets[\"fields\"][facet_fieldname], key=lambda x: x[1]\n                            )\n                            facets[\"fields\"][facet_fieldname] = srt\n                elif facet_type == \"date_histogram\":\n                    # Elasticsearch provides UTC timestamps with an extra three\n                    # decimals of precision, which datetime barfs on.\n                    facets[\"dates\"][facet_fieldname] = [\n                        (\n                            datetime.datetime.utcfromtimestamp(\n                                individual[\"key\"] / 1000\n                            ),\n                            individual[\"doc_count\"],\n                        )\n                        for individual in facet_info[\"buckets\"]\n                    ]\n                elif facet_type == \"query\":\n                    facets[\"queries\"][facet_fieldname] = facet_info[\"doc_count\"]\n        results[\"facets\"] = facets\n        return results\n\n\nclass Elasticsearch2SearchQuery(ElasticsearchSearchQuery):\n    pass\n\n\nclass Elasticsearch2SearchEngine(BaseEngine):\n    backend = Elasticsearch2SearchBackend\n    query = Elasticsearch2SearchQuery\n"
  },
  {
    "path": "haystack/backends/elasticsearch5_backend.py",
    "content": "import datetime\nimport warnings\n\nfrom django.conf import settings\n\nimport haystack\nfrom haystack.backends import BaseEngine\nfrom haystack.backends.elasticsearch_backend import (\n    ElasticsearchSearchBackend,\n    ElasticsearchSearchQuery,\n)\nfrom haystack.constants import DEFAULT_OPERATOR, DJANGO_CT, FUZZINESS\nfrom haystack.exceptions import MissingDependency\nfrom haystack.utils import get_identifier, get_model_ct\n\ntry:\n    import elasticsearch\n\n    if not ((5, 0, 0) <= elasticsearch.__version__ < (6, 0, 0)):\n        raise ImportError\n    from elasticsearch.helpers import bulk, scan\nexcept ImportError:\n    raise MissingDependency(\n        \"The 'elasticsearch5' backend requires the \\\n                            installation of 'elasticsearch>=5.0.0,<6.0.0'. \\\n                            Please refer to the documentation.\"\n    )\n\n\nclass Elasticsearch5SearchBackend(ElasticsearchSearchBackend):\n    def __init__(self, connection_alias, **connection_options):\n        super().__init__(connection_alias, **connection_options)\n        self.content_field_name = None\n\n    def clear(self, models=None, commit=True):\n        \"\"\"\n        Clears the backend of all documents/objects for a collection of models.\n\n        :param models: List or tuple of models to clear.\n        :param commit: Not used.\n        \"\"\"\n        if models is not None:\n            assert isinstance(models, (list, tuple))\n\n        try:\n            if models is None:\n                self.conn.indices.delete(index=self.index_name, ignore=404)\n                self.setup_complete = False\n                self.existing_mapping = {}\n                self.content_field_name = None\n            else:\n                models_to_delete = []\n\n                for model in models:\n                    models_to_delete.append(\"%s:%s\" % (DJANGO_CT, get_model_ct(model)))\n\n                # Delete using scroll API\n                query = {\n                    \"query\": {\"query_string\": {\"query\": \" OR \".join(models_to_delete)}}\n                }\n                generator = scan(\n                    self.conn,\n                    query=query,\n                    index=self.index_name,\n                    doc_type=\"modelresult\",\n                )\n                actions = (\n                    {\"_op_type\": \"delete\", \"_id\": doc[\"_id\"]} for doc in generator\n                )\n                bulk(\n                    self.conn,\n                    actions=actions,\n                    index=self.index_name,\n                    doc_type=\"modelresult\",\n                )\n                self.conn.indices.refresh(index=self.index_name)\n\n        except elasticsearch.TransportError as e:\n            if not self.silently_fail:\n                raise\n\n            if models is not None:\n                self.log.error(\n                    \"Failed to clear Elasticsearch index of models '%s': %s\",\n                    \",\".join(models_to_delete),\n                    e,\n                    exc_info=True,\n                )\n            else:\n                self.log.error(\n                    \"Failed to clear Elasticsearch index: %s\", e, exc_info=True\n                )\n\n    def build_search_kwargs(\n        self,\n        query_string,\n        sort_by=None,\n        start_offset=0,\n        end_offset=None,\n        fields=\"\",\n        highlight=False,\n        facets=None,\n        date_facets=None,\n        query_facets=None,\n        narrow_queries=None,\n        spelling_query=None,\n        within=None,\n        dwithin=None,\n        distance_point=None,\n        models=None,\n        limit_to_registered_models=None,\n        result_class=None,\n        **extra_kwargs\n    ):\n        index = haystack.connections[self.connection_alias].get_unified_index()\n        content_field = index.document_field\n\n        if query_string == \"*:*\":\n            kwargs = {\"query\": {\"match_all\": {}}}\n        else:\n            kwargs = {\n                \"query\": {\n                    \"query_string\": {\n                        \"default_field\": content_field,\n                        \"default_operator\": DEFAULT_OPERATOR,\n                        \"query\": query_string,\n                        \"analyze_wildcard\": True,\n                        \"auto_generate_phrase_queries\": True,\n                        \"fuzziness\": FUZZINESS,\n                    }\n                }\n            }\n\n        filters = []\n\n        if fields:\n            if isinstance(fields, (list, set)):\n                fields = \" \".join(fields)\n\n            kwargs[\"stored_fields\"] = fields\n\n        if sort_by is not None:\n            order_list = []\n            for field, direction in sort_by:\n                if field == \"distance\" and distance_point:\n                    # Do the geo-enabled sort.\n                    lng, lat = distance_point[\"point\"].coords\n                    sort_kwargs = {\n                        \"_geo_distance\": {\n                            distance_point[\"field\"]: [lng, lat],\n                            \"order\": direction,\n                            \"unit\": \"km\",\n                        }\n                    }\n                else:\n                    if field == \"distance\":\n                        warnings.warn(\n                            \"In order to sort by distance, you must call the '.distance(...)' method.\"\n                        )\n\n                    # Regular sorting.\n                    sort_kwargs = {field: {\"order\": direction}}\n\n                order_list.append(sort_kwargs)\n\n            kwargs[\"sort\"] = order_list\n\n        # From/size offsets don't seem to work right in Elasticsearch's DSL. :/\n        # if start_offset is not None:\n        #     kwargs['from'] = start_offset\n\n        # if end_offset is not None:\n        #     kwargs['size'] = end_offset - start_offset\n\n        if highlight:\n            # `highlight` can either be True or a dictionary containing custom parameters\n            # which will be passed to the backend and may override our default settings:\n\n            kwargs[\"highlight\"] = {\"fields\": {content_field: {}}}\n\n            if isinstance(highlight, dict):\n                kwargs[\"highlight\"].update(highlight)\n\n        if self.include_spelling:\n            kwargs[\"suggest\"] = {\n                \"suggest\": {\n                    \"text\": spelling_query or query_string,\n                    \"term\": {\n                        # Using content_field here will result in suggestions of stemmed words.\n                        \"field\": \"_all\"\n                    },\n                }\n            }\n\n        if narrow_queries is None:\n            narrow_queries = set()\n\n        if facets is not None:\n            kwargs.setdefault(\"aggs\", {})\n\n            for facet_fieldname, extra_options in facets.items():\n                facet_options = {\n                    \"meta\": {\"_type\": \"terms\"},\n                    \"terms\": {\"field\": index.get_facet_fieldname(facet_fieldname)},\n                }\n                if \"order\" in extra_options:\n                    facet_options[\"meta\"][\"order\"] = extra_options.pop(\"order\")\n                # Special cases for options applied at the facet level (not the terms level).\n                if extra_options.pop(\"global_scope\", False):\n                    # Renamed \"global_scope\" since \"global\" is a python keyword.\n                    facet_options[\"global\"] = True\n                if \"facet_filter\" in extra_options:\n                    facet_options[\"facet_filter\"] = extra_options.pop(\"facet_filter\")\n                facet_options[\"terms\"].update(extra_options)\n                kwargs[\"aggs\"][facet_fieldname] = facet_options\n\n        if date_facets is not None:\n            kwargs.setdefault(\"aggs\", {})\n\n            for facet_fieldname, value in date_facets.items():\n                # Need to detect on gap_by & only add amount if it's more than one.\n                interval = value.get(\"gap_by\").lower()\n\n                # Need to detect on amount (can't be applied on months or years).\n                if value.get(\"gap_amount\", 1) != 1 and interval not in (\n                    \"month\",\n                    \"year\",\n                ):\n                    # Just the first character is valid for use.\n                    interval = \"%s%s\" % (value[\"gap_amount\"], interval[:1])\n\n                kwargs[\"aggs\"][facet_fieldname] = {\n                    \"meta\": {\"_type\": \"date_histogram\"},\n                    \"date_histogram\": {\"field\": facet_fieldname, \"interval\": interval},\n                    \"aggs\": {\n                        facet_fieldname: {\n                            \"date_range\": {\n                                \"field\": facet_fieldname,\n                                \"ranges\": [\n                                    {\n                                        \"from\": self._from_python(\n                                            value.get(\"start_date\")\n                                        ),\n                                        \"to\": self._from_python(value.get(\"end_date\")),\n                                    }\n                                ],\n                            }\n                        }\n                    },\n                }\n\n        if query_facets is not None:\n            kwargs.setdefault(\"aggs\", {})\n\n            for facet_fieldname, value in query_facets:\n                kwargs[\"aggs\"][facet_fieldname] = {\n                    \"meta\": {\"_type\": \"query\"},\n                    \"filter\": {\"query_string\": {\"query\": value}},\n                }\n\n        if limit_to_registered_models is None:\n            limit_to_registered_models = getattr(\n                settings, \"HAYSTACK_LIMIT_TO_REGISTERED_MODELS\", True\n            )\n\n        if models and len(models):\n            model_choices = sorted(get_model_ct(model) for model in models)\n        elif limit_to_registered_models:\n            # Using narrow queries, limit the results to only models handled\n            # with the current routers.\n            model_choices = self.build_models_list()\n        else:\n            model_choices = []\n\n        if len(model_choices) > 0:\n            filters.append({\"terms\": {DJANGO_CT: model_choices}})\n\n        for q in narrow_queries:\n            filters.append({\"query_string\": {\"query\": q}})\n\n        if within is not None:\n            filters.append(self._build_search_query_within(within))\n\n        if dwithin is not None:\n            filters.append(self._build_search_query_dwithin(dwithin))\n\n        # if we want to filter, change the query type to bool\n        if filters:\n            kwargs[\"query\"] = {\"bool\": {\"must\": kwargs.pop(\"query\")}}\n            if len(filters) == 1:\n                kwargs[\"query\"][\"bool\"][\"filter\"] = filters[0]\n            else:\n                kwargs[\"query\"][\"bool\"][\"filter\"] = {\"bool\": {\"must\": filters}}\n\n        if extra_kwargs:\n            kwargs.update(extra_kwargs)\n\n        return kwargs\n\n    def _build_search_query_dwithin(self, dwithin):\n        lng, lat = dwithin[\"point\"].coords\n        distance = \"%(dist).6f%(unit)s\" % {\"dist\": dwithin[\"distance\"].km, \"unit\": \"km\"}\n        return {\n            \"geo_distance\": {\n                \"distance\": distance,\n                dwithin[\"field\"]: {\"lat\": lat, \"lon\": lng},\n            }\n        }\n\n    def _build_search_query_within(self, within):\n        from haystack.utils.geo import generate_bounding_box\n\n        ((south, west), (north, east)) = generate_bounding_box(\n            within[\"point_1\"], within[\"point_2\"]\n        )\n        return {\n            \"geo_bounding_box\": {\n                within[\"field\"]: {\n                    \"top_left\": {\"lat\": north, \"lon\": west},\n                    \"bottom_right\": {\"lat\": south, \"lon\": east},\n                }\n            }\n        }\n\n    def more_like_this(\n        self,\n        model_instance,\n        additional_query_string=None,\n        start_offset=0,\n        end_offset=None,\n        models=None,\n        limit_to_registered_models=None,\n        result_class=None,\n        **kwargs\n    ):\n        from haystack import connections\n\n        if not self.setup_complete:\n            self.setup()\n\n        # Deferred models will have a different class (\"RealClass_Deferred_fieldname\")\n        # which won't be in our registry:\n        model_klass = model_instance._meta.concrete_model\n\n        index = (\n            connections[self.connection_alias]\n            .get_unified_index()\n            .get_index(model_klass)\n        )\n        field_name = index.get_content_field()\n        params = {}\n\n        if start_offset is not None:\n            params[\"from_\"] = start_offset\n\n        if end_offset is not None:\n            params[\"size\"] = end_offset - start_offset\n\n        doc_id = get_identifier(model_instance)\n\n        try:\n            # More like this Query\n            # https://www.elastic.co/guide/en/elasticsearch/reference/2.2/query-dsl-mlt-query.html\n            mlt_query = {\n                \"query\": {\n                    \"more_like_this\": {\n                        \"fields\": [field_name],\n                        \"like\": [{\"_id\": doc_id}],\n                    }\n                }\n            }\n\n            narrow_queries = []\n\n            if additional_query_string and additional_query_string != \"*:*\":\n                additional_filter = {\"query_string\": {\"query\": additional_query_string}}\n                narrow_queries.append(additional_filter)\n\n            if limit_to_registered_models is None:\n                limit_to_registered_models = getattr(\n                    settings, \"HAYSTACK_LIMIT_TO_REGISTERED_MODELS\", True\n                )\n\n            if models and len(models):\n                model_choices = sorted(get_model_ct(model) for model in models)\n            elif limit_to_registered_models:\n                # Using narrow queries, limit the results to only models handled\n                # with the current routers.\n                model_choices = self.build_models_list()\n            else:\n                model_choices = []\n\n            if len(model_choices) > 0:\n                model_filter = {\"terms\": {DJANGO_CT: model_choices}}\n                narrow_queries.append(model_filter)\n\n            if len(narrow_queries) > 0:\n                mlt_query = {\n                    \"query\": {\n                        \"bool\": {\n                            \"must\": mlt_query[\"query\"],\n                            \"filter\": {\"bool\": {\"must\": list(narrow_queries)}},\n                        }\n                    }\n                }\n\n            raw_results = self.conn.search(\n                body=mlt_query,\n                index=self.index_name,\n                doc_type=\"modelresult\",\n                _source=True,\n                **params\n            )\n        except elasticsearch.TransportError as e:\n            if not self.silently_fail:\n                raise\n\n            self.log.error(\n                \"Failed to fetch More Like This from Elasticsearch for document '%s': %s\",\n                doc_id,\n                e,\n                exc_info=True,\n            )\n            raw_results = {}\n\n        return self._process_results(raw_results, result_class=result_class)\n\n    def _process_results(\n        self,\n        raw_results,\n        highlight=False,\n        result_class=None,\n        distance_point=None,\n        geo_sort=False,\n    ):\n        results = super()._process_results(\n            raw_results, highlight, result_class, distance_point, geo_sort\n        )\n        facets = {}\n        if \"aggregations\" in raw_results:\n            facets = {\"fields\": {}, \"dates\": {}, \"queries\": {}}\n\n            for facet_fieldname, facet_info in raw_results[\"aggregations\"].items():\n                facet_type = facet_info[\"meta\"][\"_type\"]\n                if facet_type == \"terms\":\n                    facets[\"fields\"][facet_fieldname] = [\n                        (individual[\"key\"], individual[\"doc_count\"])\n                        for individual in facet_info[\"buckets\"]\n                    ]\n                    if \"order\" in facet_info[\"meta\"]:\n                        if facet_info[\"meta\"][\"order\"] == \"reverse_count\":\n                            srt = sorted(\n                                facets[\"fields\"][facet_fieldname], key=lambda x: x[1]\n                            )\n                            facets[\"fields\"][facet_fieldname] = srt\n                elif facet_type == \"date_histogram\":\n                    # Elasticsearch provides UTC timestamps with an extra three\n                    # decimals of precision, which datetime barfs on.\n                    facets[\"dates\"][facet_fieldname] = [\n                        (\n                            datetime.datetime.utcfromtimestamp(\n                                individual[\"key\"] / 1000\n                            ),\n                            individual[\"doc_count\"],\n                        )\n                        for individual in facet_info[\"buckets\"]\n                    ]\n                elif facet_type == \"query\":\n                    facets[\"queries\"][facet_fieldname] = facet_info[\"doc_count\"]\n        results[\"facets\"] = facets\n        return results\n\n\nclass Elasticsearch5SearchQuery(ElasticsearchSearchQuery):\n    def add_field_facet(self, field, **options):\n        \"\"\"Adds a regular facet on a field.\"\"\"\n        # to be renamed to the facet fieldname by build_search_kwargs later\n        self.facets[field] = options.copy()\n\n\nclass Elasticsearch5SearchEngine(BaseEngine):\n    backend = Elasticsearch5SearchBackend\n    query = Elasticsearch5SearchQuery\n"
  },
  {
    "path": "haystack/backends/elasticsearch_backend.py",
    "content": "import re\nimport warnings\nfrom datetime import datetime, timedelta\n\nfrom django.conf import settings\nfrom django.core.exceptions import ImproperlyConfigured\n\nimport haystack\nfrom haystack.backends import BaseEngine, BaseSearchBackend, BaseSearchQuery, log_query\nfrom haystack.constants import (\n    DEFAULT_OPERATOR,\n    DJANGO_CT,\n    DJANGO_ID,\n    FUZZY_MAX_EXPANSIONS,\n    FUZZY_MIN_SIM,\n    ID,\n)\nfrom haystack.exceptions import MissingDependency, MoreLikeThisError, SkipDocument\nfrom haystack.inputs import Clean, Exact, PythonData, Raw\nfrom haystack.models import SearchResult\nfrom haystack.utils import get_identifier, get_model_ct\nfrom haystack.utils import log as logging\nfrom haystack.utils.app_loading import haystack_get_model\n\ntry:\n    import elasticsearch\n\n    if (1, 0, 0) <= elasticsearch.__version__ < (2, 0, 0):\n        warnings.warn(\n            \"ElasticSearch 1.x support deprecated, will be removed in 4.0\",\n            DeprecationWarning,\n        )\n\n    try:\n        # let's try this, for elasticsearch > 1.7.0\n        from elasticsearch.helpers import bulk\n    except ImportError:\n        # let's try this, for elasticsearch <= 1.7.0\n        from elasticsearch.helpers import bulk_index as bulk\n    from elasticsearch.exceptions import NotFoundError\nexcept ImportError:\n    raise MissingDependency(\n        \"The 'elasticsearch' backend requires the installation of 'elasticsearch'. Please refer to the documentation.\"\n    )\n\n\nDATETIME_REGEX = re.compile(\n    r\"^(?P<year>\\d{4})-(?P<month>\\d{2})-(?P<day>\\d{2})T\"\n    r\"(?P<hour>\\d{2}):(?P<minute>\\d{2}):(?P<second>\\d{2})(\\.\\d+)?$\"\n)\n\n\nclass ElasticsearchSearchBackend(BaseSearchBackend):\n    # Word reserved by Elasticsearch for special use.\n    RESERVED_WORDS = (\"AND\", \"NOT\", \"OR\", \"TO\")\n\n    # Characters reserved by Elasticsearch for special use.\n    # The '\\\\' must come first, so as not to overwrite the other slash replacements.\n    RESERVED_CHARACTERS = (\n        \"\\\\\",\n        \"+\",\n        \"-\",\n        \"&&\",\n        \"||\",\n        \"!\",\n        \"(\",\n        \")\",\n        \"{\",\n        \"}\",\n        \"[\",\n        \"]\",\n        \"^\",\n        '\"',\n        \"~\",\n        \"*\",\n        \"?\",\n        \":\",\n        \"/\",\n    )\n\n    # Settings to add an n-gram & edge n-gram analyzer.\n    DEFAULT_SETTINGS = {\n        \"settings\": {\n            \"analysis\": {\n                \"analyzer\": {\n                    \"ngram_analyzer\": {\n                        \"type\": \"custom\",\n                        \"tokenizer\": \"standard\",\n                        \"filter\": [\"haystack_ngram\", \"lowercase\"],\n                    },\n                    \"edgengram_analyzer\": {\n                        \"type\": \"custom\",\n                        \"tokenizer\": \"standard\",\n                        \"filter\": [\"haystack_edgengram\", \"lowercase\"],\n                    },\n                },\n                \"tokenizer\": {\n                    \"haystack_ngram_tokenizer\": {\n                        \"type\": \"nGram\",\n                        \"min_gram\": 3,\n                        \"max_gram\": 15,\n                    },\n                    \"haystack_edgengram_tokenizer\": {\n                        \"type\": \"edgeNGram\",\n                        \"min_gram\": 2,\n                        \"max_gram\": 15,\n                        \"side\": \"front\",\n                    },\n                },\n                \"filter\": {\n                    \"haystack_ngram\": {\"type\": \"nGram\", \"min_gram\": 3, \"max_gram\": 15},\n                    \"haystack_edgengram\": {\n                        \"type\": \"edgeNGram\",\n                        \"min_gram\": 2,\n                        \"max_gram\": 15,\n                    },\n                },\n            }\n        }\n    }\n\n    def __init__(self, connection_alias, **connection_options):\n        super().__init__(connection_alias, **connection_options)\n\n        if \"URL\" not in connection_options:\n            raise ImproperlyConfigured(\n                \"You must specify a 'URL' in your settings for connection '%s'.\"\n                % connection_alias\n            )\n\n        if \"INDEX_NAME\" not in connection_options:\n            raise ImproperlyConfigured(\n                \"You must specify a 'INDEX_NAME' in your settings for connection '%s'.\"\n                % connection_alias\n            )\n\n        self.conn = elasticsearch.Elasticsearch(\n            connection_options[\"URL\"],\n            timeout=self.timeout,\n            **connection_options.get(\"KWARGS\", {})\n        )\n        self.index_name = connection_options[\"INDEX_NAME\"]\n        self.log = logging.getLogger(\"haystack\")\n        self.setup_complete = False\n        self.existing_mapping = {}\n\n    def setup(self):\n        \"\"\"\n        Defers loading until needed.\n        \"\"\"\n        # Get the existing mapping & cache it. We'll compare it\n        # during the ``update`` & if it doesn't match, we'll put the new\n        # mapping.\n        try:\n            self.existing_mapping = self.conn.indices.get_mapping(index=self.index_name)\n        except NotFoundError:\n            pass\n        except Exception:\n            if not self.silently_fail:\n                raise\n\n        unified_index = haystack.connections[self.connection_alias].get_unified_index()\n        self.content_field_name, field_mapping = self.build_schema(\n            unified_index.all_searchfields()\n        )\n        current_mapping = {\"modelresult\": {\"properties\": field_mapping}}\n\n        if current_mapping != self.existing_mapping:\n            try:\n                # Make sure the index is there first.\n                self.conn.indices.create(\n                    index=self.index_name, body=self.DEFAULT_SETTINGS, ignore=400\n                )\n                self.conn.indices.put_mapping(\n                    index=self.index_name, doc_type=\"modelresult\", body=current_mapping\n                )\n                self.existing_mapping = current_mapping\n            except Exception:\n                if not self.silently_fail:\n                    raise\n\n        self.setup_complete = True\n\n    def update(self, index, iterable, commit=True):\n        if not self.setup_complete:\n            try:\n                self.setup()\n            except elasticsearch.TransportError as e:\n                if not self.silently_fail:\n                    raise\n\n                self.log.error(\n                    \"Failed to add documents to Elasticsearch: %s\", e, exc_info=True\n                )\n                return\n\n        prepped_docs = []\n\n        for obj in iterable:\n            try:\n                prepped_data = index.full_prepare(obj)\n                final_data = {}\n\n                # Convert the data to make sure it's happy.\n                for key, value in prepped_data.items():\n                    final_data[key] = self._from_python(value)\n                final_data[\"_id\"] = final_data[ID]\n\n                prepped_docs.append(final_data)\n            except SkipDocument:\n                self.log.debug(\"Indexing for object `%s` skipped\", obj)\n            except elasticsearch.TransportError as e:\n                if not self.silently_fail:\n                    raise\n\n                # We'll log the object identifier but won't include the actual object\n                # to avoid the possibility of that generating encoding errors while\n                # processing the log message:\n                self.log.error(\n                    \"%s while preparing object for update\" % e.__class__.__name__,\n                    exc_info=True,\n                    extra={\"data\": {\"index\": index, \"object\": get_identifier(obj)}},\n                )\n\n        bulk(self.conn, prepped_docs, index=self.index_name, doc_type=\"modelresult\")\n\n        if commit:\n            self.conn.indices.refresh(index=self.index_name)\n\n    def remove(self, obj_or_string, commit=True):\n        doc_id = get_identifier(obj_or_string)\n\n        if not self.setup_complete:\n            try:\n                self.setup()\n            except elasticsearch.TransportError as e:\n                if not self.silently_fail:\n                    raise\n\n                self.log.error(\n                    \"Failed to remove document '%s' from Elasticsearch: %s\",\n                    doc_id,\n                    e,\n                    exc_info=True,\n                )\n                return\n\n        try:\n            self.conn.delete(\n                index=self.index_name, doc_type=\"modelresult\", id=doc_id, ignore=404\n            )\n\n            if commit:\n                self.conn.indices.refresh(index=self.index_name)\n        except elasticsearch.TransportError as e:\n            if not self.silently_fail:\n                raise\n\n            self.log.error(\n                \"Failed to remove document '%s' from Elasticsearch: %s\",\n                doc_id,\n                e,\n                exc_info=True,\n            )\n\n    def clear(self, models=None, commit=True):\n        # We actually don't want to do this here, as mappings could be\n        # very different.\n        # if not self.setup_complete:\n        #     self.setup()\n\n        if models is not None:\n            assert isinstance(models, (list, tuple))\n\n        try:\n            if models is None:\n                self.conn.indices.delete(index=self.index_name, ignore=404)\n                self.setup_complete = False\n                self.existing_mapping = {}\n            else:\n                models_to_delete = []\n\n                for model in models:\n                    models_to_delete.append(\"%s:%s\" % (DJANGO_CT, get_model_ct(model)))\n\n                # Delete by query in Elasticsearch asssumes you're dealing with\n                # a ``query`` root object. :/\n                query = {\n                    \"query\": {\"query_string\": {\"query\": \" OR \".join(models_to_delete)}}\n                }\n                self.conn.delete_by_query(\n                    index=self.index_name, doc_type=\"modelresult\", body=query\n                )\n        except elasticsearch.TransportError as e:\n            if not self.silently_fail:\n                raise\n\n            if models is not None:\n                self.log.error(\n                    \"Failed to clear Elasticsearch index of models '%s': %s\",\n                    \",\".join(models_to_delete),\n                    e,\n                    exc_info=True,\n                )\n            else:\n                self.log.error(\n                    \"Failed to clear Elasticsearch index: %s\", e, exc_info=True\n                )\n\n    def build_search_kwargs(\n        self,\n        query_string,\n        sort_by=None,\n        start_offset=0,\n        end_offset=None,\n        fields=\"\",\n        highlight=False,\n        facets=None,\n        date_facets=None,\n        query_facets=None,\n        narrow_queries=None,\n        spelling_query=None,\n        within=None,\n        dwithin=None,\n        distance_point=None,\n        models=None,\n        limit_to_registered_models=None,\n        result_class=None,\n        **extra_kwargs\n    ):\n        index = haystack.connections[self.connection_alias].get_unified_index()\n        content_field = index.document_field\n\n        if query_string == \"*:*\":\n            kwargs = {\"query\": {\"match_all\": {}}}\n        else:\n            kwargs = {\n                \"query\": {\n                    \"query_string\": {\n                        \"default_field\": content_field,\n                        \"default_operator\": DEFAULT_OPERATOR,\n                        \"query\": query_string,\n                        \"analyze_wildcard\": True,\n                        \"auto_generate_phrase_queries\": True,\n                        \"fuzzy_min_sim\": FUZZY_MIN_SIM,\n                        \"fuzzy_max_expansions\": FUZZY_MAX_EXPANSIONS,\n                    }\n                }\n            }\n\n        # so far, no filters\n        filters = []\n\n        if fields:\n            if isinstance(fields, (list, set)):\n                fields = \" \".join(fields)\n\n            kwargs[\"fields\"] = fields\n\n        if sort_by is not None:\n            order_list = []\n            for field, direction in sort_by:\n                if field == \"distance\" and distance_point:\n                    # Do the geo-enabled sort.\n                    lng, lat = distance_point[\"point\"].coords\n                    sort_kwargs = {\n                        \"_geo_distance\": {\n                            distance_point[\"field\"]: [lng, lat],\n                            \"order\": direction,\n                            \"unit\": \"km\",\n                        }\n                    }\n                else:\n                    if field == \"distance\":\n                        warnings.warn(\n                            \"In order to sort by distance, you must call the '.distance(...)' method.\"\n                        )\n\n                    # Regular sorting.\n                    sort_kwargs = {field: {\"order\": direction}}\n\n                order_list.append(sort_kwargs)\n\n            kwargs[\"sort\"] = order_list\n\n        # From/size offsets don't seem to work right in Elasticsearch's DSL. :/\n        # if start_offset is not None:\n        #     kwargs['from'] = start_offset\n\n        # if end_offset is not None:\n        #     kwargs['size'] = end_offset - start_offset\n\n        if highlight:\n            # `highlight` can either be True or a dictionary containing custom parameters\n            # which will be passed to the backend and may override our default settings:\n\n            kwargs[\"highlight\"] = {\"fields\": {content_field: {\"store\": \"yes\"}}}\n\n            if isinstance(highlight, dict):\n                kwargs[\"highlight\"].update(highlight)\n\n        if self.include_spelling:\n            kwargs[\"suggest\"] = {\n                \"suggest\": {\n                    \"text\": spelling_query or query_string,\n                    \"term\": {\n                        # Using content_field here will result in suggestions of stemmed words.\n                        \"field\": \"_all\"\n                    },\n                }\n            }\n\n        if narrow_queries is None:\n            narrow_queries = set()\n\n        if facets is not None:\n            kwargs.setdefault(\"facets\", {})\n\n            for facet_fieldname, extra_options in facets.items():\n                facet_options = {\"terms\": {\"field\": facet_fieldname, \"size\": 100}}\n                # Special cases for options applied at the facet level (not the terms level).\n                if extra_options.pop(\"global_scope\", False):\n                    # Renamed \"global_scope\" since \"global\" is a python keyword.\n                    facet_options[\"global\"] = True\n                if \"facet_filter\" in extra_options:\n                    facet_options[\"facet_filter\"] = extra_options.pop(\"facet_filter\")\n                facet_options[\"terms\"].update(extra_options)\n                kwargs[\"facets\"][facet_fieldname] = facet_options\n\n        if date_facets is not None:\n            kwargs.setdefault(\"facets\", {})\n\n            for facet_fieldname, value in date_facets.items():\n                # Need to detect on gap_by & only add amount if it's more than one.\n                interval = value.get(\"gap_by\").lower()\n\n                # Need to detect on amount (can't be applied on months or years).\n                if value.get(\"gap_amount\", 1) != 1 and interval not in (\n                    \"month\",\n                    \"year\",\n                ):\n                    # Just the first character is valid for use.\n                    interval = \"%s%s\" % (value[\"gap_amount\"], interval[:1])\n\n                kwargs[\"facets\"][facet_fieldname] = {\n                    \"date_histogram\": {\"field\": facet_fieldname, \"interval\": interval},\n                    \"facet_filter\": {\n                        \"range\": {\n                            facet_fieldname: {\n                                \"from\": self._from_python(value.get(\"start_date\")),\n                                \"to\": self._from_python(value.get(\"end_date\")),\n                            }\n                        }\n                    },\n                }\n\n        if query_facets is not None:\n            kwargs.setdefault(\"facets\", {})\n\n            for facet_fieldname, value in query_facets:\n                kwargs[\"facets\"][facet_fieldname] = {\n                    \"query\": {\"query_string\": {\"query\": value}}\n                }\n\n        if limit_to_registered_models is None:\n            limit_to_registered_models = getattr(\n                settings, \"HAYSTACK_LIMIT_TO_REGISTERED_MODELS\", True\n            )\n\n        if models and len(models):\n            model_choices = sorted(get_model_ct(model) for model in models)\n        elif limit_to_registered_models:\n            # Using narrow queries, limit the results to only models handled\n            # with the current routers.\n            model_choices = self.build_models_list()\n        else:\n            model_choices = []\n\n        if len(model_choices) > 0:\n            filters.append({\"terms\": {DJANGO_CT: model_choices}})\n\n        for q in narrow_queries:\n            filters.append(\n                {\"fquery\": {\"query\": {\"query_string\": {\"query\": q}}, \"_cache\": True}}\n            )\n\n        if within is not None:\n            from haystack.utils.geo import generate_bounding_box\n\n            ((south, west), (north, east)) = generate_bounding_box(\n                within[\"point_1\"], within[\"point_2\"]\n            )\n            within_filter = {\n                \"geo_bounding_box\": {\n                    within[\"field\"]: {\n                        \"top_left\": {\"lat\": north, \"lon\": west},\n                        \"bottom_right\": {\"lat\": south, \"lon\": east},\n                    }\n                }\n            }\n            filters.append(within_filter)\n\n        if dwithin is not None:\n            lng, lat = dwithin[\"point\"].coords\n\n            # NB: the 1.0.0 release of elasticsearch introduce an\n            #     incompatible change on the distance filter formating\n            if elasticsearch.VERSION >= (1, 0, 0):\n                distance = \"%(dist).6f%(unit)s\" % {\n                    \"dist\": dwithin[\"distance\"].km,\n                    \"unit\": \"km\",\n                }\n            else:\n                distance = dwithin[\"distance\"].km\n\n            dwithin_filter = {\n                \"geo_distance\": {\n                    \"distance\": distance,\n                    dwithin[\"field\"]: {\"lat\": lat, \"lon\": lng},\n                }\n            }\n            filters.append(dwithin_filter)\n\n        # if we want to filter, change the query type to filteres\n        if filters:\n            kwargs[\"query\"] = {\"filtered\": {\"query\": kwargs.pop(\"query\")}}\n            if len(filters) == 1:\n                kwargs[\"query\"][\"filtered\"][\"filter\"] = filters[0]\n            else:\n                kwargs[\"query\"][\"filtered\"][\"filter\"] = {\"bool\": {\"must\": filters}}\n\n        if extra_kwargs:\n            kwargs.update(extra_kwargs)\n\n        return kwargs\n\n    @log_query\n    def search(self, query_string, **kwargs):\n        if len(query_string) == 0:\n            return {\"results\": [], \"hits\": 0}\n\n        if not self.setup_complete:\n            self.setup()\n\n        search_kwargs = self.build_search_kwargs(query_string, **kwargs)\n        search_kwargs[\"from\"] = kwargs.get(\"start_offset\", 0)\n\n        order_fields = set()\n        for order in search_kwargs.get(\"sort\", []):\n            for key in order.keys():\n                order_fields.add(key)\n\n        geo_sort = \"_geo_distance\" in order_fields\n\n        end_offset = kwargs.get(\"end_offset\")\n        start_offset = kwargs.get(\"start_offset\", 0)\n        if end_offset is not None and end_offset > start_offset:\n            search_kwargs[\"size\"] = end_offset - start_offset\n\n        try:\n            raw_results = self.conn.search(\n                body=search_kwargs,\n                index=self.index_name,\n                doc_type=\"modelresult\",\n                _source=True,\n            )\n        except elasticsearch.TransportError as e:\n            if not self.silently_fail:\n                raise\n\n            self.log.error(\n                \"Failed to query Elasticsearch using '%s': %s\",\n                query_string,\n                e,\n                exc_info=True,\n            )\n            raw_results = {}\n\n        return self._process_results(\n            raw_results,\n            highlight=kwargs.get(\"highlight\"),\n            result_class=kwargs.get(\"result_class\", SearchResult),\n            distance_point=kwargs.get(\"distance_point\"),\n            geo_sort=geo_sort,\n        )\n\n    def more_like_this(\n        self,\n        model_instance,\n        additional_query_string=None,\n        start_offset=0,\n        end_offset=None,\n        models=None,\n        limit_to_registered_models=None,\n        result_class=None,\n        **kwargs\n    ):\n        from haystack import connections\n\n        if not self.setup_complete:\n            self.setup()\n\n        # Deferred models will have a different class (\"RealClass_Deferred_fieldname\")\n        # which won't be in our registry:\n        model_klass = model_instance._meta.concrete_model\n\n        index = (\n            connections[self.connection_alias]\n            .get_unified_index()\n            .get_index(model_klass)\n        )\n        field_name = index.get_content_field()\n        params = {}\n\n        if start_offset is not None:\n            params[\"search_from\"] = start_offset\n\n        if end_offset is not None:\n            params[\"search_size\"] = end_offset - start_offset\n\n        doc_id = get_identifier(model_instance)\n\n        try:\n            raw_results = self.conn.mlt(\n                index=self.index_name,\n                doc_type=\"modelresult\",\n                id=doc_id,\n                mlt_fields=[field_name],\n                **params\n            )\n        except elasticsearch.TransportError as e:\n            if not self.silently_fail:\n                raise\n\n            self.log.error(\n                \"Failed to fetch More Like This from Elasticsearch for document '%s': %s\",\n                doc_id,\n                e,\n                exc_info=True,\n            )\n            raw_results = {}\n\n        return self._process_results(raw_results, result_class=result_class)\n\n    def _process_results(\n        self,\n        raw_results,\n        highlight=False,\n        result_class=None,\n        distance_point=None,\n        geo_sort=False,\n    ):\n        from haystack import connections\n\n        results = []\n        hits = raw_results.get(\"hits\", {}).get(\"total\", 0)\n        facets = {}\n        spelling_suggestion = None\n\n        if result_class is None:\n            result_class = SearchResult\n\n        if self.include_spelling and \"suggest\" in raw_results:\n            raw_suggest = raw_results[\"suggest\"].get(\"suggest\")\n            if raw_suggest:\n                spelling_suggestion = \" \".join(\n                    [\n                        word[\"text\"]\n                        if len(word[\"options\"]) == 0\n                        else word[\"options\"][0][\"text\"]\n                        for word in raw_suggest\n                    ]\n                )\n\n        if \"facets\" in raw_results:\n            facets = {\"fields\": {}, \"dates\": {}, \"queries\": {}}\n\n            # ES can return negative timestamps for pre-1970 data. Handle it.\n            def from_timestamp(tm):\n                if tm >= 0:\n                    return datetime.utcfromtimestamp(tm)\n                else:\n                    return datetime(1970, 1, 1) + timedelta(seconds=tm)\n\n            for facet_fieldname, facet_info in raw_results[\"facets\"].items():\n                if facet_info.get(\"_type\", \"terms\") == \"terms\":\n                    facets[\"fields\"][facet_fieldname] = [\n                        (individual[\"term\"], individual[\"count\"])\n                        for individual in facet_info[\"terms\"]\n                    ]\n                elif facet_info.get(\"_type\", \"terms\") == \"date_histogram\":\n                    # Elasticsearch provides UTC timestamps with an extra three\n                    # decimals of precision, which datetime barfs on.\n                    facets[\"dates\"][facet_fieldname] = [\n                        (from_timestamp(individual[\"time\"] / 1000), individual[\"count\"])\n                        for individual in facet_info[\"entries\"]\n                    ]\n                elif facet_info.get(\"_type\", \"terms\") == \"query\":\n                    facets[\"queries\"][facet_fieldname] = facet_info[\"count\"]\n\n        unified_index = connections[self.connection_alias].get_unified_index()\n        indexed_models = unified_index.get_indexed_models()\n        content_field = unified_index.document_field\n\n        for raw_result in raw_results.get(\"hits\", {}).get(\"hits\", []):\n            source = raw_result[\"_source\"]\n            app_label, model_name = source[DJANGO_CT].split(\".\")\n            additional_fields = {}\n            model = haystack_get_model(app_label, model_name)\n\n            if model and model in indexed_models:\n                index = source and unified_index.get_index(model)\n                for key, value in source.items():\n                    string_key = str(key)\n\n                    if string_key in index.fields and hasattr(\n                        index.fields[string_key], \"convert\"\n                    ):\n                        additional_fields[string_key] = index.fields[\n                            string_key\n                        ].convert(value)\n                    else:\n                        additional_fields[string_key] = self._to_python(value)\n\n                del additional_fields[DJANGO_CT]\n                del additional_fields[DJANGO_ID]\n\n                if \"highlight\" in raw_result:\n                    additional_fields[\"highlighted\"] = raw_result[\"highlight\"].get(\n                        content_field, \"\"\n                    )\n\n                if distance_point:\n                    additional_fields[\"_point_of_origin\"] = distance_point\n\n                    if geo_sort and raw_result.get(\"sort\"):\n                        from django.contrib.gis.measure import Distance\n\n                        additional_fields[\"_distance\"] = Distance(\n                            km=float(raw_result[\"sort\"][0])\n                        )\n                    else:\n                        additional_fields[\"_distance\"] = None\n\n                result = result_class(\n                    app_label,\n                    model_name,\n                    source[DJANGO_ID],\n                    raw_result[\"_score\"],\n                    **additional_fields\n                )\n                results.append(result)\n            else:\n                hits -= 1\n\n        return {\n            \"results\": results,\n            \"hits\": hits,\n            \"facets\": facets,\n            \"spelling_suggestion\": spelling_suggestion,\n        }\n\n    def build_schema(self, fields):\n        content_field_name = \"\"\n        mapping = {\n            DJANGO_CT: {\n                \"type\": \"string\",\n                \"index\": \"not_analyzed\",\n                \"include_in_all\": False,\n            },\n            DJANGO_ID: {\n                \"type\": \"string\",\n                \"index\": \"not_analyzed\",\n                \"include_in_all\": False,\n            },\n        }\n\n        for _, field_class in fields.items():\n            field_mapping = FIELD_MAPPINGS.get(\n                field_class.field_type, DEFAULT_FIELD_MAPPING\n            ).copy()\n            if field_class.boost != 1.0:\n                field_mapping[\"boost\"] = field_class.boost\n\n            if field_class.document is True:\n                content_field_name = field_class.index_fieldname\n\n            # Do this last to override `text` fields.\n            if field_mapping[\"type\"] == \"string\":\n                if field_class.indexed is False or hasattr(field_class, \"facet_for\"):\n                    field_mapping[\"index\"] = \"not_analyzed\"\n                    del field_mapping[\"analyzer\"]\n\n            mapping[field_class.index_fieldname] = field_mapping\n\n        return (content_field_name, mapping)\n\n    def _iso_datetime(self, value):\n        \"\"\"\n        If value appears to be something datetime-like, return it in ISO format.\n\n        Otherwise, return None.\n        \"\"\"\n        if hasattr(value, \"strftime\"):\n            if hasattr(value, \"hour\"):\n                return value.isoformat()\n            else:\n                return \"%sT00:00:00\" % value.isoformat()\n\n    def _from_python(self, value):\n        \"\"\"Convert more Python data types to ES-understandable JSON.\"\"\"\n        iso = self._iso_datetime(value)\n        if iso:\n            return iso\n        elif isinstance(value, bytes):\n            # TODO: Be stricter.\n            return str(value, errors=\"replace\")\n        elif isinstance(value, set):\n            return list(value)\n        return value\n\n    def _to_python(self, value):\n        \"\"\"Convert values from ElasticSearch to native Python values.\"\"\"\n        if isinstance(value, (int, float, complex, list, tuple, bool)):\n            return value\n\n        if isinstance(value, str):\n            possible_datetime = DATETIME_REGEX.search(value)\n\n            if possible_datetime:\n                date_values = possible_datetime.groupdict()\n\n                for dk, dv in date_values.items():\n                    date_values[dk] = int(dv)\n\n                return datetime(\n                    date_values[\"year\"],\n                    date_values[\"month\"],\n                    date_values[\"day\"],\n                    date_values[\"hour\"],\n                    date_values[\"minute\"],\n                    date_values[\"second\"],\n                )\n\n        try:\n            # This is slightly gross but it's hard to tell otherwise what the\n            # string's original type might have been. Be careful who you trust.\n            converted_value = eval(value)\n\n            # Try to handle most built-in types.\n            if isinstance(\n                converted_value, (int, list, tuple, set, dict, float, complex)\n            ):\n                return converted_value\n        except Exception:\n            # If it fails (SyntaxError or its ilk) or we don't trust it,\n            # continue on.\n            pass\n\n        return value\n\n\n# DRL_FIXME: Perhaps move to something where, if none of these\n#            match, call a custom method on the form that returns, per-backend,\n#            the right type of storage?\nDEFAULT_FIELD_MAPPING = {\"type\": \"string\", \"analyzer\": \"snowball\"}\nFIELD_MAPPINGS = {\n    \"edge_ngram\": {\"type\": \"string\", \"analyzer\": \"edgengram_analyzer\"},\n    \"ngram\": {\"type\": \"string\", \"analyzer\": \"ngram_analyzer\"},\n    \"date\": {\"type\": \"date\"},\n    \"datetime\": {\"type\": \"date\"},\n    \"location\": {\"type\": \"geo_point\"},\n    \"boolean\": {\"type\": \"boolean\"},\n    \"float\": {\"type\": \"float\"},\n    \"long\": {\"type\": \"long\"},\n    \"integer\": {\"type\": \"long\"},\n}\n\n\n# Sucks that this is almost an exact copy of what's in the Solr backend,\n# but we can't import due to dependencies.\nclass ElasticsearchSearchQuery(BaseSearchQuery):\n    def matching_all_fragment(self):\n        return \"*:*\"\n\n    def build_query_fragment(self, field, filter_type, value):\n        from haystack import connections\n\n        query_frag = \"\"\n\n        if not hasattr(value, \"input_type_name\"):\n            # Handle when we've got a ``ValuesListQuerySet``...\n            if hasattr(value, \"values_list\"):\n                value = list(value)\n\n            if isinstance(value, str):\n                # It's not an ``InputType``. Assume ``Clean``.\n                value = Clean(value)\n            else:\n                value = PythonData(value)\n\n        # Prepare the query using the InputType.\n        prepared_value = value.prepare(self)\n\n        if not isinstance(prepared_value, (set, list, tuple)):\n            # Then convert whatever we get back to what pysolr wants if needed.\n            prepared_value = self.backend._from_python(prepared_value)\n\n        # 'content' is a special reserved word, much like 'pk' in\n        # Django's ORM layer. It indicates 'no special field'.\n        if field == \"content\":\n            index_fieldname = \"\"\n        else:\n            index_fieldname = \"%s:\" % connections[\n                self._using\n            ].get_unified_index().get_index_fieldname(field)\n\n        filter_types = {\n            \"content\": \"%s\",\n            \"contains\": \"*%s*\",\n            \"endswith\": \"*%s\",\n            \"startswith\": \"%s*\",\n            \"exact\": \"%s\",\n            \"gt\": \"{%s TO *}\",\n            \"gte\": \"[%s TO *]\",\n            \"lt\": \"{* TO %s}\",\n            \"lte\": \"[* TO %s]\",\n            \"fuzzy\": \"%s~\",\n        }\n\n        if value.post_process is False:\n            query_frag = prepared_value\n        else:\n            if filter_type in [\n                \"content\",\n                \"contains\",\n                \"startswith\",\n                \"endswith\",\n                \"fuzzy\",\n            ]:\n                if value.input_type_name == \"exact\":\n                    query_frag = prepared_value\n                else:\n                    # Iterate over terms & incorportate the converted form of each into the query.\n                    terms = []\n\n                    if isinstance(prepared_value, str):\n                        for possible_value in prepared_value.split(\" \"):\n                            terms.append(\n                                filter_types[filter_type]\n                                % self.backend._from_python(possible_value)\n                            )\n                    else:\n                        terms.append(\n                            filter_types[filter_type]\n                            % self.backend._from_python(prepared_value)\n                        )\n\n                    if len(terms) == 1:\n                        query_frag = terms[0]\n                    else:\n                        query_frag = \"(%s)\" % \" AND \".join(terms)\n            elif filter_type == \"in\":\n                in_options = []\n\n                if not prepared_value:\n                    query_frag = \"(!*:*)\"\n                else:\n                    for possible_value in prepared_value:\n                        in_options.append(\n                            '\"%s\"' % self.backend._from_python(possible_value)\n                        )\n                    query_frag = \"(%s)\" % \" OR \".join(in_options)\n\n            elif filter_type == \"range\":\n                start = self.backend._from_python(prepared_value[0])\n                end = self.backend._from_python(prepared_value[1])\n                query_frag = '[\"%s\" TO \"%s\"]' % (start, end)\n            elif filter_type == \"exact\":\n                if value.input_type_name == \"exact\":\n                    query_frag = prepared_value\n                else:\n                    prepared_value = Exact(prepared_value).prepare(self)\n                    query_frag = filter_types[filter_type] % prepared_value\n            else:\n                if value.input_type_name != \"exact\":\n                    prepared_value = Exact(prepared_value).prepare(self)\n\n                query_frag = filter_types[filter_type] % prepared_value\n\n        if len(query_frag) and not isinstance(value, Raw):\n            if not query_frag.startswith(\"(\") and not query_frag.endswith(\")\"):\n                query_frag = \"(%s)\" % query_frag\n\n        return \"%s%s\" % (index_fieldname, query_frag)\n\n    def build_alt_parser_query(self, parser_name, query_string=\"\", **kwargs):\n        if query_string:\n            kwargs[\"v\"] = query_string\n\n        kwarg_bits = []\n\n        for key in sorted(kwargs.keys()):\n            if isinstance(kwargs[key], str) and \" \" in kwargs[key]:\n                kwarg_bits.append(\"%s='%s'\" % (key, kwargs[key]))\n            else:\n                kwarg_bits.append(\"%s=%s\" % (key, kwargs[key]))\n\n        return \"{!%s %s}\" % (parser_name, \" \".join(kwarg_bits))\n\n    def build_params(self, spelling_query=None, **kwargs):\n        search_kwargs = {\n            \"start_offset\": self.start_offset,\n            \"result_class\": self.result_class,\n        }\n        order_by_list = None\n\n        if self.order_by:\n            if order_by_list is None:\n                order_by_list = []\n\n            for field in self.order_by:\n                direction = \"asc\"\n                if field.startswith(\"-\"):\n                    direction = \"desc\"\n                    field = field[1:]\n                order_by_list.append((field, direction))\n\n            search_kwargs[\"sort_by\"] = order_by_list\n\n        if self.date_facets:\n            search_kwargs[\"date_facets\"] = self.date_facets\n\n        if self.distance_point:\n            search_kwargs[\"distance_point\"] = self.distance_point\n\n        if self.dwithin:\n            search_kwargs[\"dwithin\"] = self.dwithin\n\n        if self.end_offset is not None:\n            search_kwargs[\"end_offset\"] = self.end_offset\n\n        if self.facets:\n            search_kwargs[\"facets\"] = self.facets\n\n        if self.fields:\n            search_kwargs[\"fields\"] = self.fields\n\n        if self.highlight:\n            search_kwargs[\"highlight\"] = self.highlight\n\n        if self.models:\n            search_kwargs[\"models\"] = self.models\n\n        if self.narrow_queries:\n            search_kwargs[\"narrow_queries\"] = self.narrow_queries\n\n        if self.query_facets:\n            search_kwargs[\"query_facets\"] = self.query_facets\n\n        if self.within:\n            search_kwargs[\"within\"] = self.within\n\n        if spelling_query:\n            search_kwargs[\"spelling_query\"] = spelling_query\n        elif self.spelling_query:\n            search_kwargs[\"spelling_query\"] = self.spelling_query\n\n        return search_kwargs\n\n    def run(self, spelling_query=None, **kwargs):\n        \"\"\"Builds and executes the query. Returns a list of search results.\"\"\"\n        final_query = self.build_query()\n        search_kwargs = self.build_params(spelling_query, **kwargs)\n\n        if kwargs:\n            search_kwargs.update(kwargs)\n\n        results = self.backend.search(final_query, **search_kwargs)\n        self._results = results.get(\"results\", [])\n        self._hit_count = results.get(\"hits\", 0)\n        self._facet_counts = self.post_process_facets(results)\n        self._spelling_suggestion = results.get(\"spelling_suggestion\", None)\n\n    def run_mlt(self, **kwargs):\n        \"\"\"Builds and executes the query. Returns a list of search results.\"\"\"\n        if self._more_like_this is False or self._mlt_instance is None:\n            raise MoreLikeThisError(\n                \"No instance was provided to determine 'More Like This' results.\"\n            )\n\n        additional_query_string = self.build_query()\n        search_kwargs = {\n            \"start_offset\": self.start_offset,\n            \"result_class\": self.result_class,\n            \"models\": self.models,\n        }\n\n        if self.end_offset is not None:\n            search_kwargs[\"end_offset\"] = self.end_offset - self.start_offset\n\n        results = self.backend.more_like_this(\n            self._mlt_instance, additional_query_string, **search_kwargs\n        )\n        self._results = results.get(\"results\", [])\n        self._hit_count = results.get(\"hits\", 0)\n\n\nclass ElasticsearchSearchEngine(BaseEngine):\n    backend = ElasticsearchSearchBackend\n    query = ElasticsearchSearchQuery\n"
  },
  {
    "path": "haystack/backends/simple_backend.py",
    "content": "\"\"\"\nA very basic, ORM-based backend for simple search during tests.\n\"\"\"\nfrom functools import reduce\nfrom warnings import warn\n\nfrom django.db.models import Q\n\nfrom haystack import connections\nfrom haystack.backends import (\n    BaseEngine,\n    BaseSearchBackend,\n    BaseSearchQuery,\n    SearchNode,\n    log_query,\n)\nfrom haystack.inputs import PythonData\nfrom haystack.models import SearchResult\nfrom haystack.utils import get_model_ct_tuple\n\n\nclass SimpleSearchBackend(BaseSearchBackend):\n    def update(self, indexer, iterable, commit=True):\n        warn(\"update is not implemented in this backend\")\n\n    def remove(self, obj, commit=True):\n        warn(\"remove is not implemented in this backend\")\n\n    def clear(self, models=None, commit=True):\n        warn(\"clear is not implemented in this backend\")\n\n    @log_query\n    def search(self, query_string, **kwargs):\n        hits = 0\n        results = []\n        result_class = SearchResult\n        models = (\n            connections[self.connection_alias].get_unified_index().get_indexed_models()\n        )\n\n        if kwargs.get(\"result_class\"):\n            result_class = kwargs[\"result_class\"]\n\n        if kwargs.get(\"models\"):\n            models = kwargs[\"models\"]\n\n        if query_string:\n            for model in models:\n                if query_string == \"*\":\n                    qs = model.objects.all()\n                else:\n                    for term in query_string.split():\n                        queries = []\n\n                        for field in model._meta.fields:\n                            if hasattr(field, \"related\"):\n                                continue\n\n                            if not field.get_internal_type() in (\n                                \"TextField\",\n                                \"CharField\",\n                                \"SlugField\",\n                            ):\n                                continue\n\n                            queries.append(Q(**{\"%s__icontains\" % field.name: term}))\n\n                        if queries:\n                            qs = model.objects.filter(\n                                reduce(lambda x, y: x | y, queries)\n                            )\n                        else:\n                            qs = []\n\n                hits += len(qs)\n\n                for match in qs:\n                    match.__dict__.pop(\"score\", None)\n                    app_label, model_name = get_model_ct_tuple(match)\n                    result = result_class(\n                        app_label, model_name, match.pk, 0, **match.__dict__\n                    )\n                    # For efficiency.\n                    result._model = match.__class__\n                    result._object = match\n                    results.append(result)\n\n        return {\"results\": results, \"hits\": hits}\n\n    def prep_value(self, db_field, value):\n        return value\n\n    def more_like_this(\n        self,\n        model_instance,\n        additional_query_string=None,\n        start_offset=0,\n        end_offset=None,\n        limit_to_registered_models=None,\n        result_class=None,\n        **kwargs\n    ):\n        return {\"results\": [], \"hits\": 0}\n\n\nclass SimpleSearchQuery(BaseSearchQuery):\n    def build_query(self):\n        if not self.query_filter:\n            return \"*\"\n\n        return self._build_sub_query(self.query_filter)\n\n    def _build_sub_query(self, search_node):\n        term_list = []\n\n        for child in search_node.children:\n            if isinstance(child, SearchNode):\n                term_list.append(self._build_sub_query(child))\n            else:\n                value = child[1]\n\n                if not hasattr(value, \"input_type_name\"):\n                    value = PythonData(value)\n\n                term_list.append(value.prepare(self))\n\n        return (\" \").join(map(str, term_list))\n\n\nclass SimpleEngine(BaseEngine):\n    backend = SimpleSearchBackend\n    query = SimpleSearchQuery\n"
  },
  {
    "path": "haystack/backends/solr_backend.py",
    "content": "import warnings\n\nfrom django.conf import settings\nfrom django.core.exceptions import ImproperlyConfigured\n\nimport haystack\nfrom haystack.backends import (\n    BaseEngine,\n    BaseSearchBackend,\n    BaseSearchQuery,\n    EmptyResults,\n    log_query,\n)\nfrom haystack.constants import DJANGO_CT, DJANGO_ID, ID\nfrom haystack.exceptions import MissingDependency, MoreLikeThisError, SkipDocument\nfrom haystack.inputs import Clean, Exact, PythonData, Raw\nfrom haystack.models import SearchResult\nfrom haystack.utils import get_identifier, get_model_ct\nfrom haystack.utils import log as logging\nfrom haystack.utils.app_loading import haystack_get_model\n\ntry:\n    from pysolr import Solr, SolrError\nexcept ImportError:\n    raise MissingDependency(\n        \"The 'solr' backend requires the installation of 'pysolr'. Please refer to the documentation.\"\n    )\n\n\nclass SolrSearchBackend(BaseSearchBackend):\n    # Word reserved by Solr for special use.\n    RESERVED_WORDS = (\"AND\", \"NOT\", \"OR\", \"TO\")\n\n    # Characters reserved by Solr for special use.\n    # The '\\\\' must come first, so as not to overwrite the other slash replacements.\n    RESERVED_CHARACTERS = (\n        \"\\\\\",\n        \"+\",\n        \"-\",\n        \"&&\",\n        \"||\",\n        \"!\",\n        \"(\",\n        \")\",\n        \"{\",\n        \"}\",\n        \"[\",\n        \"]\",\n        \"^\",\n        '\"',\n        \"~\",\n        \"*\",\n        \"?\",\n        \":\",\n        \"/\",\n    )\n\n    def __init__(self, connection_alias, **connection_options):\n        super().__init__(connection_alias, **connection_options)\n\n        if \"URL\" not in connection_options:\n            raise ImproperlyConfigured(\n                \"You must specify a 'URL' in your settings for connection '%s'.\"\n                % connection_alias\n            )\n\n        self.collate = connection_options.get(\"COLLATE_SPELLING\", True)\n\n        self.conn = Solr(\n            connection_options[\"URL\"],\n            timeout=self.timeout,\n            **connection_options.get(\"KWARGS\", {})\n        )\n        self.log = logging.getLogger(\"haystack\")\n\n    def update(self, index, iterable, commit=True):\n        docs = []\n\n        for obj in iterable:\n            try:\n                docs.append(index.full_prepare(obj))\n            except SkipDocument:\n                self.log.debug(\"Indexing for object `%s` skipped\", obj)\n            except UnicodeDecodeError:\n                if not self.silently_fail:\n                    raise\n\n                # We'll log the object identifier but won't include the actual object\n                # to avoid the possibility of that generating encoding errors while\n                # processing the log message:\n                self.log.error(\n                    \"UnicodeDecodeError while preparing object for update\",\n                    exc_info=True,\n                    extra={\"data\": {\"index\": index, \"object\": get_identifier(obj)}},\n                )\n\n        if len(docs) > 0:\n            try:\n                self.conn.add(docs, commit=commit, boost=index.get_field_weights())\n            except (IOError, SolrError) as e:\n                if not self.silently_fail:\n                    raise\n\n                self.log.error(\"Failed to add documents to Solr: %s\", e, exc_info=True)\n\n    def remove(self, obj_or_string, commit=True):\n        solr_id = get_identifier(obj_or_string)\n\n        try:\n            kwargs = {\"commit\": commit, \"id\": solr_id}\n            self.conn.delete(**kwargs)\n        except (IOError, SolrError) as e:\n            if not self.silently_fail:\n                raise\n\n            self.log.error(\n                \"Failed to remove document '%s' from Solr: %s\",\n                solr_id,\n                e,\n                exc_info=True,\n            )\n\n    def clear(self, models=None, commit=True):\n        if models is not None:\n            assert isinstance(models, (list, tuple))\n\n        try:\n            if models is None:\n                # *:* matches all docs in Solr\n                self.conn.delete(q=\"*:*\", commit=commit)\n            else:\n                models_to_delete = []\n\n                for model in models:\n                    models_to_delete.append(\"%s:%s\" % (DJANGO_CT, get_model_ct(model)))\n\n                self.conn.delete(q=\" OR \".join(models_to_delete), commit=commit)\n\n            if commit:\n                # Run an optimize post-clear. http://wiki.apache.org/solr/FAQ#head-9aafb5d8dff5308e8ea4fcf4b71f19f029c4bb99\n                self.conn.optimize()\n        except (IOError, SolrError) as e:\n            if not self.silently_fail:\n                raise\n\n            if models is not None:\n                self.log.error(\n                    \"Failed to clear Solr index of models '%s': %s\",\n                    \",\".join(models_to_delete),\n                    e,\n                    exc_info=True,\n                )\n            else:\n                self.log.error(\"Failed to clear Solr index: %s\", e, exc_info=True)\n\n    @log_query\n    def search(self, query_string, **kwargs):\n        if len(query_string) == 0:\n            return {\"results\": [], \"hits\": 0}\n\n        search_kwargs = self.build_search_kwargs(query_string, **kwargs)\n\n        try:\n            raw_results = self.conn.search(query_string, **search_kwargs)\n        except (IOError, SolrError) as e:\n            if not self.silently_fail:\n                raise\n\n            self.log.error(\n                \"Failed to query Solr using '%s': %s\", query_string, e, exc_info=True\n            )\n            raw_results = EmptyResults()\n\n        return self._process_results(\n            raw_results,\n            highlight=kwargs.get(\"highlight\"),\n            result_class=kwargs.get(\"result_class\", SearchResult),\n            distance_point=kwargs.get(\"distance_point\"),\n        )\n\n    def build_search_kwargs(\n        self,\n        query_string,\n        sort_by=None,\n        start_offset=0,\n        end_offset=None,\n        fields=\"\",\n        highlight=False,\n        facets=None,\n        date_facets=None,\n        query_facets=None,\n        narrow_queries=None,\n        spelling_query=None,\n        within=None,\n        dwithin=None,\n        distance_point=None,\n        models=None,\n        limit_to_registered_models=None,\n        result_class=None,\n        stats=None,\n        collate=None,\n        **extra_kwargs\n    ):\n\n        index = haystack.connections[self.connection_alias].get_unified_index()\n\n        kwargs = {\"fl\": \"* score\", \"df\": index.document_field}\n\n        if fields:\n            if isinstance(fields, (list, set)):\n                fields = \" \".join(fields)\n\n            kwargs[\"fl\"] = fields\n\n        if sort_by is not None:\n            if sort_by in [\"distance asc\", \"distance desc\"] and distance_point:\n                # Do the geo-enabled sort.\n                lng, lat = distance_point[\"point\"].coords\n                kwargs[\"sfield\"] = distance_point[\"field\"]\n                kwargs[\"pt\"] = \"%s,%s\" % (lat, lng)\n\n                if sort_by == \"distance asc\":\n                    kwargs[\"sort\"] = \"geodist() asc\"\n                else:\n                    kwargs[\"sort\"] = \"geodist() desc\"\n            else:\n                if sort_by.startswith(\"distance \"):\n                    warnings.warn(\n                        \"In order to sort by distance, you must call the '.distance(...)' method.\"\n                    )\n\n                # Regular sorting.\n                kwargs[\"sort\"] = sort_by\n\n        if start_offset is not None:\n            kwargs[\"start\"] = start_offset\n\n        if end_offset is not None:\n            kwargs[\"rows\"] = end_offset - start_offset\n\n        if highlight:\n            # `highlight` can either be True or a dictionary containing custom parameters\n            # which will be passed to the backend and may override our default settings:\n\n            kwargs[\"hl\"] = \"true\"\n            kwargs[\"hl.fragsize\"] = \"200\"\n\n            if isinstance(highlight, dict):\n                # autoprefix highlighter options with 'hl.', all of them start with it anyway\n                # this makes option dicts shorter: {'maxAnalyzedChars': 42}\n                # and lets some of options be used as keyword arguments: `.highlight(preserveMulti=False)`\n                kwargs.update(\n                    {\n                        key if key.startswith(\"hl.\") else (\"hl.\" + key): highlight[key]\n                        for key in highlight.keys()\n                    }\n                )\n\n        if collate is None:\n            collate = self.collate\n        if self.include_spelling is True:\n            kwargs[\"spellcheck\"] = \"true\"\n            kwargs[\"spellcheck.collate\"] = str(collate).lower()\n            kwargs[\"spellcheck.count\"] = 1\n\n            if spelling_query:\n                kwargs[\"spellcheck.q\"] = spelling_query\n\n        if facets is not None:\n            kwargs[\"facet\"] = \"on\"\n            kwargs[\"facet.field\"] = facets.keys()\n\n            for facet_field, options in facets.items():\n                for key, value in options.items():\n                    kwargs[\n                        \"f.%s.facet.%s\" % (facet_field, key)\n                    ] = self.conn._from_python(value)\n\n        if date_facets is not None:\n            kwargs[\"facet\"] = \"on\"\n            kwargs[\"facet.date\"] = date_facets.keys()\n            kwargs[\"facet.date.other\"] = \"none\"\n\n            for key, value in date_facets.items():\n                kwargs[\"f.%s.facet.date.start\" % key] = self.conn._from_python(\n                    value.get(\"start_date\")\n                )\n                kwargs[\"f.%s.facet.date.end\" % key] = self.conn._from_python(\n                    value.get(\"end_date\")\n                )\n                gap_by_string = value.get(\"gap_by\").upper()\n                gap_string = \"%d%s\" % (value.get(\"gap_amount\"), gap_by_string)\n\n                if value.get(\"gap_amount\") != 1:\n                    gap_string += \"S\"\n\n                kwargs[\"f.%s.facet.date.gap\" % key] = \"+%s/%s\" % (\n                    gap_string,\n                    gap_by_string,\n                )\n\n        if query_facets is not None:\n            kwargs[\"facet\"] = \"on\"\n            kwargs[\"facet.query\"] = [\n                \"%s:%s\" % (field, value) for field, value in query_facets\n            ]\n\n        if limit_to_registered_models is None:\n            limit_to_registered_models = getattr(\n                settings, \"HAYSTACK_LIMIT_TO_REGISTERED_MODELS\", True\n            )\n\n        if models and len(models):\n            model_choices = sorted(get_model_ct(model) for model in models)\n        elif limit_to_registered_models:\n            # Using narrow queries, limit the results to only models handled\n            # with the current routers.\n            model_choices = self.build_models_list()\n        else:\n            model_choices = []\n\n        if len(model_choices) > 0:\n            if narrow_queries is None:\n                narrow_queries = set()\n\n            narrow_queries.add(\"%s:(%s)\" % (DJANGO_CT, \" OR \".join(model_choices)))\n\n        if narrow_queries is not None:\n            kwargs[\"fq\"] = list(narrow_queries)\n\n        if stats:\n            kwargs[\"stats\"] = \"true\"\n\n            for k in stats.keys():\n                kwargs[\"stats.field\"] = k\n\n                for facet in stats[k]:\n                    kwargs[\"f.%s.stats.facet\" % k] = facet\n\n        if within is not None:\n            from haystack.utils.geo import generate_bounding_box\n\n            kwargs.setdefault(\"fq\", [])\n            ((min_lat, min_lng), (max_lat, max_lng)) = generate_bounding_box(\n                within[\"point_1\"], within[\"point_2\"]\n            )\n            # Bounding boxes are min, min TO max, max. Solr's wiki was *NOT*\n            # very clear on this.\n            bbox = \"%s:[%s,%s TO %s,%s]\" % (\n                within[\"field\"],\n                min_lat,\n                min_lng,\n                max_lat,\n                max_lng,\n            )\n            kwargs[\"fq\"].append(bbox)\n\n        if dwithin is not None:\n            kwargs.setdefault(\"fq\", [])\n            lng, lat = dwithin[\"point\"].coords\n            geofilt = \"{!geofilt pt=%s,%s sfield=%s d=%s}\" % (\n                lat,\n                lng,\n                dwithin[\"field\"],\n                dwithin[\"distance\"].km,\n            )\n            kwargs[\"fq\"].append(geofilt)\n\n        # Check to see if the backend should try to include distances\n        # (Solr 4.X+) in the results.\n        if self.distance_available and distance_point:\n            # In early testing, you can't just hand Solr 4.X a proper bounding box\n            # & request distances. To enable native distance would take calculating\n            # a center point & a radius off the user-provided box, which kinda\n            # sucks. We'll avoid it for now, since Solr 4.x's release will be some\n            # time yet.\n            # kwargs['fl'] += ' _dist_:geodist()'\n            pass\n\n        if extra_kwargs:\n            kwargs.update(extra_kwargs)\n\n        return kwargs\n\n    def more_like_this(\n        self,\n        model_instance,\n        additional_query_string=None,\n        start_offset=0,\n        end_offset=None,\n        models=None,\n        limit_to_registered_models=None,\n        result_class=None,\n        **kwargs\n    ):\n        from haystack import connections\n\n        # Deferred models will have a different class (\"RealClass_Deferred_fieldname\")\n        # which won't be in our registry:\n        model_klass = model_instance._meta.concrete_model\n\n        index = (\n            connections[self.connection_alias]\n            .get_unified_index()\n            .get_index(model_klass)\n        )\n        field_name = index.get_content_field()\n        params = {\"fl\": \"*,score\"}\n\n        if start_offset is not None:\n            params[\"start\"] = start_offset\n\n        if end_offset is not None:\n            params[\"rows\"] = end_offset\n\n        narrow_queries = set()\n\n        if limit_to_registered_models is None:\n            limit_to_registered_models = getattr(\n                settings, \"HAYSTACK_LIMIT_TO_REGISTERED_MODELS\", True\n            )\n\n        if models and len(models):\n            model_choices = sorted(get_model_ct(model) for model in models)\n        elif limit_to_registered_models:\n            # Using narrow queries, limit the results to only models handled\n            # with the current routers.\n            model_choices = self.build_models_list()\n        else:\n            model_choices = []\n\n        if len(model_choices) > 0:\n            if narrow_queries is None:\n                narrow_queries = set()\n\n            narrow_queries.add(\"%s:(%s)\" % (DJANGO_CT, \" OR \".join(model_choices)))\n\n        if additional_query_string:\n            narrow_queries.add(additional_query_string)\n\n        if narrow_queries:\n            params[\"fq\"] = list(narrow_queries)\n\n        query = \"%s:%s\" % (ID, get_identifier(model_instance))\n\n        try:\n            raw_results = self.conn.more_like_this(query, field_name, **params)\n        except (IOError, SolrError) as e:\n            if not self.silently_fail:\n                raise\n\n            self.log.error(\n                \"Failed to fetch More Like This from Solr for document '%s': %s\",\n                query,\n                e,\n                exc_info=True,\n            )\n            raw_results = EmptyResults()\n\n        return self._process_results(raw_results, result_class=result_class)\n\n    def _process_results(\n        self, raw_results, highlight=False, result_class=None, distance_point=None\n    ):\n        from haystack import connections\n\n        results = []\n        hits = raw_results.hits\n        facets = {}\n        stats = {}\n        spelling_suggestion = spelling_suggestions = None\n\n        if result_class is None:\n            result_class = SearchResult\n\n        if hasattr(raw_results, \"stats\"):\n            stats = raw_results.stats.get(\"stats_fields\", {})\n\n        if hasattr(raw_results, \"facets\"):\n            facets = {\n                \"fields\": raw_results.facets.get(\"facet_fields\", {}),\n                \"dates\": raw_results.facets.get(\"facet_dates\", {}),\n                \"queries\": raw_results.facets.get(\"facet_queries\", {}),\n            }\n\n            for key in [\"fields\"]:\n                for facet_field in facets[key]:\n                    # Convert to a two-tuple, as Solr's json format returns a list of\n                    # pairs.\n                    facets[key][facet_field] = list(\n                        zip(\n                            facets[key][facet_field][::2],\n                            facets[key][facet_field][1::2],\n                        )\n                    )\n\n        if self.include_spelling and hasattr(raw_results, \"spellcheck\"):\n            try:\n                spelling_suggestions = self.extract_spelling_suggestions(raw_results)\n            except Exception as exc:\n                self.log.error(\n                    \"Error extracting spelling suggestions: %s\",\n                    exc,\n                    exc_info=True,\n                    extra={\"data\": {\"spellcheck\": raw_results.spellcheck}},\n                )\n\n                if not self.silently_fail:\n                    raise\n\n                spelling_suggestions = None\n\n            if spelling_suggestions:\n                # Maintain compatibility with older versions of Haystack which returned a single suggestion:\n                spelling_suggestion = spelling_suggestions[-1]\n                assert isinstance(spelling_suggestion, str)\n            else:\n                spelling_suggestion = None\n\n        unified_index = connections[self.connection_alias].get_unified_index()\n        indexed_models = unified_index.get_indexed_models()\n\n        for raw_result in raw_results.docs:\n            app_label, model_name = raw_result[DJANGO_CT].split(\".\")\n            additional_fields = {}\n            model = haystack_get_model(app_label, model_name)\n\n            if model and model in indexed_models:\n                index = unified_index.get_index(model)\n                index_field_map = index.field_map\n                for key, value in raw_result.items():\n                    string_key = str(key)\n                    # re-map key if alternate name used\n                    if string_key in index_field_map:\n                        string_key = index_field_map[key]\n\n                    if string_key in index.fields and hasattr(\n                        index.fields[string_key], \"convert\"\n                    ):\n                        additional_fields[string_key] = index.fields[\n                            string_key\n                        ].convert(value)\n                    else:\n                        additional_fields[string_key] = self.conn._to_python(value)\n\n                del additional_fields[DJANGO_CT]\n                del additional_fields[DJANGO_ID]\n                del additional_fields[\"score\"]\n\n                if raw_result[ID] in getattr(raw_results, \"highlighting\", {}):\n                    additional_fields[\"highlighted\"] = raw_results.highlighting[\n                        raw_result[ID]\n                    ]\n\n                if distance_point:\n                    additional_fields[\"_point_of_origin\"] = distance_point\n\n                    if raw_result.get(\"__dist__\"):\n                        from django.contrib.gis.measure import Distance\n\n                        additional_fields[\"_distance\"] = Distance(\n                            km=float(raw_result[\"__dist__\"])\n                        )\n                    else:\n                        additional_fields[\"_distance\"] = None\n\n                result = result_class(\n                    app_label,\n                    model_name,\n                    raw_result[DJANGO_ID],\n                    raw_result[\"score\"],\n                    **additional_fields\n                )\n                results.append(result)\n            else:\n                hits -= 1\n\n        return {\n            \"results\": results,\n            \"hits\": hits,\n            \"stats\": stats,\n            \"facets\": facets,\n            \"spelling_suggestion\": spelling_suggestion,\n            \"spelling_suggestions\": spelling_suggestions,\n        }\n\n    def extract_spelling_suggestions(self, raw_results):\n        # There are many different formats for Legacy, 6.4, and 6.5 e.g.\n        # https://issues.apache.org/jira/browse/SOLR-3029 and depending on the\n        # version and configuration the response format may be a dict of dicts,\n        # a list of dicts, or a list of strings.\n\n        collations = raw_results.spellcheck.get(\"collations\", None)\n        suggestions = raw_results.spellcheck.get(\"suggestions\", None)\n\n        # We'll collect multiple suggestions here. For backwards\n        # compatibility with older versions of Haystack we'll still return\n        # only a single suggestion but in the future we can expose all of\n        # them.\n\n        spelling_suggestions = []\n\n        if collations:\n            if isinstance(collations, dict):\n                # Solr 6.5\n                collation_values = collations[\"collation\"]\n                if isinstance(collation_values, str):\n                    collation_values = [collation_values]\n                elif isinstance(collation_values, dict):\n                    # spellcheck.collateExtendedResults changes the format to a dictionary:\n                    collation_values = [collation_values[\"collationQuery\"]]\n            elif isinstance(collations[1], dict):\n                # Solr 6.4\n                collation_values = collations\n            else:\n                # Older versions of Solr\n                collation_values = collations[-1:]\n\n            for i in collation_values:\n                # Depending on the options the values are either simple strings or dictionaries:\n                spelling_suggestions.append(\n                    i[\"collationQuery\"] if isinstance(i, dict) else i\n                )\n        elif suggestions:\n            if isinstance(suggestions, dict):\n                for i in suggestions.values():\n                    for j in i[\"suggestion\"]:\n                        if isinstance(j, dict):\n                            spelling_suggestions.append(j[\"word\"])\n                        else:\n                            spelling_suggestions.append(j)\n            elif isinstance(suggestions[0], str) and isinstance(suggestions[1], dict):\n                # Solr 6.4 uses a list of paired (word, dictionary) pairs:\n                for suggestion in suggestions:\n                    if isinstance(suggestion, dict):\n                        for i in suggestion[\"suggestion\"]:\n                            if isinstance(i, dict):\n                                spelling_suggestions.append(i[\"word\"])\n                            else:\n                                spelling_suggestions.append(i)\n            else:\n                # Legacy Solr\n                spelling_suggestions.append(suggestions[-1])\n\n        return spelling_suggestions\n\n    def build_schema(self, fields):\n        content_field_name = \"\"\n        schema_fields = []\n\n        for _, field_class in fields.items():\n            field_data = {\n                \"field_name\": field_class.index_fieldname,\n                \"type\": \"text_en\",\n                \"indexed\": \"true\",\n                \"stored\": \"true\",\n                \"multi_valued\": \"false\",\n            }\n\n            if field_class.document is True:\n                content_field_name = field_class.index_fieldname\n\n            # DRL_FIXME: Perhaps move to something where, if none of these\n            #            checks succeed, call a custom method on the form that\n            #            returns, per-backend, the right type of storage?\n            if field_class.field_type in [\"date\", \"datetime\"]:\n                field_data[\"type\"] = \"date\"\n            elif field_class.field_type == \"integer\":\n                field_data[\"type\"] = \"long\"\n            elif field_class.field_type == \"float\":\n                field_data[\"type\"] = \"float\"\n            elif field_class.field_type == \"boolean\":\n                field_data[\"type\"] = \"boolean\"\n            elif field_class.field_type == \"ngram\":\n                field_data[\"type\"] = \"ngram\"\n            elif field_class.field_type == \"edge_ngram\":\n                field_data[\"type\"] = \"edge_ngram\"\n            elif field_class.field_type == \"location\":\n                field_data[\"type\"] = \"location\"\n\n            if field_class.is_multivalued:\n                field_data[\"multi_valued\"] = \"true\"\n\n            if field_class.stored is False:\n                field_data[\"stored\"] = \"false\"\n\n            # Do this last to override `text` fields.\n            if field_class.indexed is False:\n                field_data[\"indexed\"] = \"false\"\n\n                # If it's text and not being indexed, we probably don't want\n                # to do the normal lowercase/tokenize/stemming/etc. dance.\n                if field_data[\"type\"] == \"text_en\":\n                    field_data[\"type\"] = \"string\"\n\n            # If it's a ``FacetField``, make sure we don't postprocess it.\n            if hasattr(field_class, \"facet_for\"):\n                # If it's text, it ought to be a string.\n                if field_data[\"type\"] == \"text_en\":\n                    field_data[\"type\"] = \"string\"\n\n            schema_fields.append(field_data)\n\n        return (content_field_name, schema_fields)\n\n    def extract_file_contents(self, file_obj, **kwargs):\n        \"\"\"Extract text and metadata from a structured file (PDF, MS Word, etc.)\n\n        Uses the Solr ExtractingRequestHandler, which is based on Apache Tika.\n        See the Solr wiki for details:\n\n            http://wiki.apache.org/solr/ExtractingRequestHandler\n\n        Due to the way the ExtractingRequestHandler is implemented it completely\n        replaces the normal Haystack indexing process with several unfortunate\n        restrictions: only one file per request, the extracted data is added to\n        the index with no ability to modify it, etc. To simplify the process and\n        allow for more advanced use we'll run using the extract-only mode to\n        return the extracted data without adding it to the index so we can then\n        use it within Haystack's normal templating process.\n\n        Returns None if metadata cannot be extracted; otherwise returns a\n        dictionary containing at least two keys:\n\n            :contents:\n                        Extracted full-text content, if applicable\n            :metadata:\n                        key:value pairs of text strings\n        \"\"\"\n\n        try:\n            return self.conn.extract(file_obj, **kwargs)\n        except Exception as e:\n            self.log.warning(\n                \"Unable to extract file contents: %s\",\n                e,\n                exc_info=True,\n                extra={\"data\": {\"file\": file_obj}},\n            )\n            return None\n\n\nclass SolrSearchQuery(BaseSearchQuery):\n    def matching_all_fragment(self):\n        return \"*:*\"\n\n    def build_query_fragment(self, field, filter_type, value):\n        from haystack import connections\n\n        query_frag = \"\"\n\n        if not hasattr(value, \"input_type_name\"):\n            # Handle when we've got a ``ValuesListQuerySet``...\n            if hasattr(value, \"values_list\"):\n                value = list(value)\n\n            if isinstance(value, str):\n                # It's not an ``InputType``. Assume ``Clean``.\n                value = Clean(value)\n            else:\n                value = PythonData(value)\n\n        # Prepare the query using the InputType.\n        prepared_value = value.prepare(self)\n\n        if not isinstance(prepared_value, (set, list, tuple)):\n            # Then convert whatever we get back to what pysolr wants if needed.\n            prepared_value = self.backend.conn._from_python(prepared_value)\n\n        # 'content' is a special reserved word, much like 'pk' in\n        # Django's ORM layer. It indicates 'no special field'.\n        if field == \"content\":\n            index_fieldname = \"\"\n        else:\n            index_fieldname = \"%s:\" % connections[\n                self._using\n            ].get_unified_index().get_index_fieldname(field)\n\n        filter_types = {\n            \"content\": \"%s\",\n            \"contains\": \"*%s*\",\n            \"endswith\": \"*%s\",\n            \"startswith\": \"%s*\",\n            \"exact\": \"%s\",\n            \"gt\": \"{%s TO *}\",\n            \"gte\": \"[%s TO *]\",\n            \"lt\": \"{* TO %s}\",\n            \"lte\": \"[* TO %s]\",\n            \"fuzzy\": \"%s~\",\n        }\n\n        if value.post_process is False:\n            query_frag = prepared_value\n        else:\n            if filter_type in [\n                \"content\",\n                \"contains\",\n                \"startswith\",\n                \"endswith\",\n                \"fuzzy\",\n            ]:\n                if value.input_type_name == \"exact\":\n                    query_frag = prepared_value\n                else:\n                    # Iterate over terms & incorportate the converted form of each into the query.\n                    terms = []\n\n                    for possible_value in prepared_value.split(\" \"):\n                        terms.append(\n                            filter_types[filter_type]\n                            % self.backend.conn._from_python(possible_value)\n                        )\n\n                    if len(terms) == 1:\n                        query_frag = terms[0]\n                    else:\n                        query_frag = \"(%s)\" % \" AND \".join(terms)\n            elif filter_type == \"in\":\n                in_options = []\n\n                if not prepared_value:\n                    query_frag = \"(!*:*)\"\n                else:\n                    for possible_value in prepared_value:\n                        in_options.append(\n                            '\"%s\"' % self.backend.conn._from_python(possible_value)\n                        )\n\n                    query_frag = \"(%s)\" % \" OR \".join(in_options)\n            elif filter_type == \"range\":\n                start = self.backend.conn._from_python(prepared_value[0])\n                end = self.backend.conn._from_python(prepared_value[1])\n                query_frag = '[\"%s\" TO \"%s\"]' % (start, end)\n            elif filter_type == \"exact\":\n                if value.input_type_name == \"exact\":\n                    query_frag = prepared_value\n                else:\n                    prepared_value = Exact(prepared_value).prepare(self)\n                    query_frag = filter_types[filter_type] % prepared_value\n            else:\n                if value.input_type_name != \"exact\":\n                    prepared_value = Exact(prepared_value).prepare(self)\n\n                query_frag = filter_types[filter_type] % prepared_value\n\n        if len(query_frag) and not isinstance(value, Raw):\n            if not query_frag.startswith(\"(\") and not query_frag.endswith(\")\"):\n                query_frag = \"(%s)\" % query_frag\n\n        return \"%s%s\" % (index_fieldname, query_frag)\n\n    def build_alt_parser_query(self, parser_name, query_string=\"\", **kwargs):\n        if query_string:\n            query_string = Clean(query_string).prepare(self)\n\n        kwarg_bits = []\n\n        for key in sorted(kwargs.keys()):\n            if isinstance(kwargs[key], str) and \" \" in kwargs[key]:\n                kwarg_bits.append(\"%s='%s'\" % (key, kwargs[key]))\n            else:\n                kwarg_bits.append(\"%s=%s\" % (key, kwargs[key]))\n\n        return '_query_:\"{!%s %s}%s\"' % (\n            parser_name,\n            Clean(\" \".join(kwarg_bits)),\n            query_string,\n        )\n\n    def build_params(self, spelling_query=None, **kwargs):\n        search_kwargs = {\n            \"start_offset\": self.start_offset,\n            \"result_class\": self.result_class,\n        }\n        order_by_list = None\n\n        if self.order_by:\n            if order_by_list is None:\n                order_by_list = []\n\n            for order_by in self.order_by:\n                if order_by.startswith(\"-\"):\n                    order_by_list.append(\"%s desc\" % order_by[1:])\n                else:\n                    order_by_list.append(\"%s asc\" % order_by)\n\n            search_kwargs[\"sort_by\"] = \", \".join(order_by_list)\n\n        if self.date_facets:\n            search_kwargs[\"date_facets\"] = self.date_facets\n\n        if self.distance_point:\n            search_kwargs[\"distance_point\"] = self.distance_point\n\n        if self.dwithin:\n            search_kwargs[\"dwithin\"] = self.dwithin\n\n        if self.end_offset is not None:\n            search_kwargs[\"end_offset\"] = self.end_offset\n\n        if self.facets:\n            search_kwargs[\"facets\"] = self.facets\n\n        if self.fields:\n            search_kwargs[\"fields\"] = self.fields\n\n        if self.highlight:\n            search_kwargs[\"highlight\"] = self.highlight\n\n        if self.models:\n            search_kwargs[\"models\"] = self.models\n\n        if self.narrow_queries:\n            search_kwargs[\"narrow_queries\"] = self.narrow_queries\n\n        if self.query_facets:\n            search_kwargs[\"query_facets\"] = self.query_facets\n\n        if self.within:\n            search_kwargs[\"within\"] = self.within\n\n        if spelling_query:\n            search_kwargs[\"spelling_query\"] = spelling_query\n        elif self.spelling_query:\n            search_kwargs[\"spelling_query\"] = self.spelling_query\n\n        if self.stats:\n            search_kwargs[\"stats\"] = self.stats\n\n        return search_kwargs\n\n    def run(self, spelling_query=None, **kwargs):\n        \"\"\"Builds and executes the query. Returns a list of search results.\"\"\"\n        final_query = self.build_query()\n        search_kwargs = self.build_params(spelling_query, **kwargs)\n\n        if kwargs:\n            search_kwargs.update(kwargs)\n\n        results = self.backend.search(final_query, **search_kwargs)\n\n        self._results = results.get(\"results\", [])\n        self._hit_count = results.get(\"hits\", 0)\n        self._facet_counts = self.post_process_facets(results)\n        self._stats = results.get(\"stats\", {})\n        self._spelling_suggestion = results.get(\"spelling_suggestion\", None)\n\n    def run_mlt(self, **kwargs):\n        \"\"\"Builds and executes the query. Returns a list of search results.\"\"\"\n        if self._more_like_this is False or self._mlt_instance is None:\n            raise MoreLikeThisError(\n                \"No instance was provided to determine 'More Like This' results.\"\n            )\n\n        additional_query_string = self.build_query()\n        search_kwargs = {\n            \"start_offset\": self.start_offset,\n            \"result_class\": self.result_class,\n            \"models\": self.models,\n        }\n\n        if self.end_offset is not None:\n            search_kwargs[\"end_offset\"] = self.end_offset - self.start_offset\n\n        results = self.backend.more_like_this(\n            self._mlt_instance, additional_query_string, **search_kwargs\n        )\n        self._results = results.get(\"results\", [])\n        self._hit_count = results.get(\"hits\", 0)\n\n\nclass SolrEngine(BaseEngine):\n    backend = SolrSearchBackend\n    query = SolrSearchQuery\n"
  },
  {
    "path": "haystack/backends/whoosh_backend.py",
    "content": "import json\nimport os\nimport re\nimport shutil\nimport threading\nimport warnings\n\nfrom django.conf import settings\nfrom django.core.exceptions import ImproperlyConfigured\nfrom django.utils.datetime_safe import datetime\nfrom django.utils.encoding import force_str\n\nfrom haystack.backends import (\n    BaseEngine,\n    BaseSearchBackend,\n    BaseSearchQuery,\n    EmptyResults,\n    log_query,\n)\nfrom haystack.constants import (\n    DJANGO_CT,\n    DJANGO_ID,\n    FUZZY_WHOOSH_MAX_EDITS,\n    FUZZY_WHOOSH_MIN_PREFIX,\n    ID,\n)\nfrom haystack.exceptions import MissingDependency, SearchBackendError, SkipDocument\nfrom haystack.inputs import Clean, Exact, PythonData, Raw\nfrom haystack.models import SearchResult\nfrom haystack.utils import get_identifier, get_model_ct\nfrom haystack.utils import log as logging\nfrom haystack.utils.app_loading import haystack_get_model\n\ntry:\n    import whoosh\nexcept ImportError:\n    raise MissingDependency(\n        \"The 'whoosh' backend requires the installation of 'Whoosh'. Please refer to the documentation.\"\n    )\n\n# Handle minimum requirement.\nif not hasattr(whoosh, \"__version__\") or whoosh.__version__ < (2, 5, 0):\n    raise MissingDependency(\"The 'whoosh' backend requires version 2.5.0 or greater.\")\n\n# Bubble up the correct error.\nfrom whoosh import index\nfrom whoosh.analysis import StemmingAnalyzer\nfrom whoosh.fields import BOOLEAN, DATETIME\nfrom whoosh.fields import ID as WHOOSH_ID\nfrom whoosh.fields import IDLIST, KEYWORD, NGRAM, NGRAMWORDS, NUMERIC, TEXT, Schema\nfrom whoosh.filedb.filestore import FileStorage, RamStorage\nfrom whoosh.highlight import ContextFragmenter, HtmlFormatter\nfrom whoosh.highlight import highlight as whoosh_highlight\nfrom whoosh.qparser import FuzzyTermPlugin, QueryParser\nfrom whoosh.searching import ResultsPage\nfrom whoosh.writing import AsyncWriter\n\nDATETIME_REGEX = re.compile(\n    r\"^(?P<year>\\d{4})-(?P<month>\\d{2})-(?P<day>\\d{2})T(?P<hour>\\d{2}):(?P<minute>\\d{2}):(?P<second>\\d{2})(\\.\\d{3,6}Z?)?$\"\n)\nLOCALS = threading.local()\nLOCALS.RAM_STORE = None\n\n\nclass WhooshHtmlFormatter(HtmlFormatter):\n    \"\"\"\n    This is a HtmlFormatter simpler than the whoosh.HtmlFormatter.\n    We use it to have consistent results across backends. Specifically,\n    Solr, Xapian and Elasticsearch are using this formatting.\n    \"\"\"\n\n    template = \"<%(tag)s>%(t)s</%(tag)s>\"\n\n\nclass WhooshSearchBackend(BaseSearchBackend):\n    # Word reserved by Whoosh for special use.\n    RESERVED_WORDS = (\"AND\", \"NOT\", \"OR\", \"TO\")\n\n    # Characters reserved by Whoosh for special use.\n    # The '\\\\' must come first, so as not to overwrite the other slash replacements.\n    RESERVED_CHARACTERS = (\n        \"\\\\\",\n        \"+\",\n        \"-\",\n        \"&&\",\n        \"||\",\n        \"!\",\n        \"(\",\n        \")\",\n        \"{\",\n        \"}\",\n        \"[\",\n        \"]\",\n        \"^\",\n        '\"',\n        \"~\",\n        \"*\",\n        \"?\",\n        \":\",\n        \".\",\n    )\n\n    def __init__(self, connection_alias, **connection_options):\n        super().__init__(connection_alias, **connection_options)\n        self.setup_complete = False\n        self.use_file_storage = True\n        self.post_limit = getattr(connection_options, \"POST_LIMIT\", 128 * 1024 * 1024)\n        self.path = connection_options.get(\"PATH\")\n\n        if connection_options.get(\"STORAGE\", \"file\") != \"file\":\n            self.use_file_storage = False\n\n        if self.use_file_storage and not self.path:\n            raise ImproperlyConfigured(\n                \"You must specify a 'PATH' in your settings for connection '%s'.\"\n                % connection_alias\n            )\n\n        self.log = logging.getLogger(\"haystack\")\n\n    def setup(self):\n        \"\"\"\n        Defers loading until needed.\n        \"\"\"\n        from haystack import connections\n\n        new_index = False\n\n        # Make sure the index is there.\n        if self.use_file_storage and not os.path.exists(self.path):\n            os.makedirs(self.path)\n            new_index = True\n\n        if self.use_file_storage and not os.access(self.path, os.W_OK):\n            raise IOError(\n                \"The path to your Whoosh index '%s' is not writable for the current user/group.\"\n                % self.path\n            )\n\n        if self.use_file_storage:\n            self.storage = FileStorage(self.path)\n        else:\n            global LOCALS\n\n            if getattr(LOCALS, \"RAM_STORE\", None) is None:\n                LOCALS.RAM_STORE = RamStorage()\n\n            self.storage = LOCALS.RAM_STORE\n\n        self.content_field_name, self.schema = self.build_schema(\n            connections[self.connection_alias].get_unified_index().all_searchfields()\n        )\n        self.parser = QueryParser(self.content_field_name, schema=self.schema)\n        self.parser.add_plugins([FuzzyTermPlugin])\n\n        if new_index is True:\n            self.index = self.storage.create_index(self.schema)\n        else:\n            try:\n                self.index = self.storage.open_index(schema=self.schema)\n            except index.EmptyIndexError:\n                self.index = self.storage.create_index(self.schema)\n\n        self.setup_complete = True\n\n    def build_schema(self, fields):\n        schema_fields = {\n            ID: WHOOSH_ID(stored=True, unique=True),\n            DJANGO_CT: WHOOSH_ID(stored=True),\n            DJANGO_ID: WHOOSH_ID(stored=True),\n        }\n        # Grab the number of keys that are hard-coded into Haystack.\n        # We'll use this to (possibly) fail slightly more gracefully later.\n        initial_key_count = len(schema_fields)\n        content_field_name = \"\"\n\n        for _, field_class in fields.items():\n            if field_class.is_multivalued:\n                if field_class.indexed is False:\n                    schema_fields[field_class.index_fieldname] = IDLIST(\n                        stored=True, field_boost=field_class.boost\n                    )\n                else:\n                    schema_fields[field_class.index_fieldname] = KEYWORD(\n                        stored=True,\n                        commas=True,\n                        scorable=True,\n                        field_boost=field_class.boost,\n                    )\n            elif field_class.field_type in [\"date\", \"datetime\"]:\n                schema_fields[field_class.index_fieldname] = DATETIME(\n                    stored=field_class.stored, sortable=True\n                )\n            elif field_class.field_type == \"integer\":\n                schema_fields[field_class.index_fieldname] = NUMERIC(\n                    stored=field_class.stored,\n                    numtype=int,\n                    field_boost=field_class.boost,\n                )\n            elif field_class.field_type == \"float\":\n                schema_fields[field_class.index_fieldname] = NUMERIC(\n                    stored=field_class.stored,\n                    numtype=float,\n                    field_boost=field_class.boost,\n                )\n            elif field_class.field_type == \"boolean\":\n                # Field boost isn't supported on BOOLEAN as of 1.8.2.\n                schema_fields[field_class.index_fieldname] = BOOLEAN(\n                    stored=field_class.stored\n                )\n            elif field_class.field_type == \"ngram\":\n                schema_fields[field_class.index_fieldname] = NGRAM(\n                    minsize=3,\n                    maxsize=15,\n                    stored=field_class.stored,\n                    field_boost=field_class.boost,\n                )\n            elif field_class.field_type == \"edge_ngram\":\n                schema_fields[field_class.index_fieldname] = NGRAMWORDS(\n                    minsize=2,\n                    maxsize=15,\n                    at=\"start\",\n                    stored=field_class.stored,\n                    field_boost=field_class.boost,\n                )\n            else:\n                schema_fields[field_class.index_fieldname] = TEXT(\n                    stored=True,\n                    analyzer=field_class.analyzer,\n                    field_boost=field_class.boost,\n                    sortable=True,\n                )\n\n            if field_class.document is True:\n                content_field_name = field_class.index_fieldname\n                schema_fields[field_class.index_fieldname].spelling = True\n\n        # Fail more gracefully than relying on the backend to die if no fields\n        # are found.\n        if len(schema_fields) <= initial_key_count:\n            raise SearchBackendError(\n                \"No fields were found in any search_indexes. Please correct this before attempting to search.\"\n            )\n\n        return (content_field_name, Schema(**schema_fields))\n\n    def update(self, index, iterable, commit=True):\n        if not self.setup_complete:\n            self.setup()\n\n        self.index = self.index.refresh()\n        writer = AsyncWriter(self.index)\n\n        for obj in iterable:\n            try:\n                doc = index.full_prepare(obj)\n            except SkipDocument:\n                self.log.debug(\"Indexing for object `%s` skipped\", obj)\n            else:\n                # Really make sure it's unicode, because Whoosh won't have it any\n                # other way.\n                for key in doc:\n                    doc[key] = self._from_python(doc[key])\n\n                # Document boosts aren't supported in Whoosh 2.5.0+.\n                if \"boost\" in doc:\n                    del doc[\"boost\"]\n\n                try:\n                    writer.update_document(**doc)\n                except Exception as e:\n                    if not self.silently_fail:\n                        raise\n\n                    # We'll log the object identifier but won't include the actual object\n                    # to avoid the possibility of that generating encoding errors while\n                    # processing the log message:\n                    self.log.error(\n                        \"%s while preparing object for update\" % e.__class__.__name__,\n                        exc_info=True,\n                        extra={\"data\": {\"index\": index, \"object\": get_identifier(obj)}},\n                    )\n\n        if len(iterable) > 0:\n            # For now, commit no matter what, as we run into locking issues otherwise.\n            writer.commit()\n\n    def remove(self, obj_or_string, commit=True):\n        if not self.setup_complete:\n            self.setup()\n\n        self.index = self.index.refresh()\n        whoosh_id = get_identifier(obj_or_string)\n\n        try:\n            self.index.delete_by_query(q=self.parser.parse('%s:\"%s\"' % (ID, whoosh_id)))\n        except Exception as e:\n            if not self.silently_fail:\n                raise\n\n            self.log.error(\n                \"Failed to remove document '%s' from Whoosh: %s\",\n                whoosh_id,\n                e,\n                exc_info=True,\n            )\n\n    def clear(self, models=None, commit=True):\n        if not self.setup_complete:\n            self.setup()\n\n        self.index = self.index.refresh()\n\n        if models is not None:\n            assert isinstance(models, (list, tuple))\n\n        try:\n            if models is None:\n                self.delete_index()\n            else:\n                models_to_delete = []\n\n                for model in models:\n                    models_to_delete.append(\"%s:%s\" % (DJANGO_CT, get_model_ct(model)))\n\n                self.index.delete_by_query(\n                    q=self.parser.parse(\" OR \".join(models_to_delete))\n                )\n        except Exception as e:\n            if not self.silently_fail:\n                raise\n\n            if models is not None:\n                self.log.error(\n                    \"Failed to clear Whoosh index of models '%s': %s\",\n                    \",\".join(models_to_delete),\n                    e,\n                    exc_info=True,\n                )\n            else:\n                self.log.error(\"Failed to clear Whoosh index: %s\", e, exc_info=True)\n\n    def delete_index(self):\n        # Per the Whoosh mailing list, if wiping out everything from the index,\n        # it's much more efficient to simply delete the index files.\n        if self.use_file_storage and os.path.exists(self.path):\n            shutil.rmtree(self.path)\n        elif not self.use_file_storage:\n            self.storage.clean()\n\n        # Recreate everything.\n        self.setup()\n\n    def optimize(self):\n        if not self.setup_complete:\n            self.setup()\n\n        self.index = self.index.refresh()\n        self.index.optimize()\n\n    def calculate_page(self, start_offset=0, end_offset=None):\n        # Prevent against Whoosh throwing an error. Requires an end_offset\n        # greater than 0.\n        if end_offset is not None and end_offset <= 0:\n            end_offset = 1\n\n        # Determine the page.\n        page_num = 0\n\n        if end_offset is None:\n            end_offset = 1000000\n\n        if start_offset is None:\n            start_offset = 0\n\n        page_length = end_offset - start_offset\n\n        if page_length and page_length > 0:\n            page_num = int(start_offset / page_length)\n\n        # Increment because Whoosh uses 1-based page numbers.\n        page_num += 1\n        return page_num, page_length\n\n    @log_query\n    def search(\n        self,\n        query_string,\n        sort_by=None,\n        start_offset=0,\n        end_offset=None,\n        fields=\"\",\n        highlight=False,\n        facets=None,\n        date_facets=None,\n        query_facets=None,\n        narrow_queries=None,\n        spelling_query=None,\n        within=None,\n        dwithin=None,\n        distance_point=None,\n        models=None,\n        limit_to_registered_models=None,\n        result_class=None,\n        **kwargs\n    ):\n        if not self.setup_complete:\n            self.setup()\n\n        # A zero length query should return no results.\n        if len(query_string) == 0:\n            return {\"results\": [], \"hits\": 0}\n\n        query_string = force_str(query_string)\n\n        # A one-character query (non-wildcard) gets nabbed by a stopwords\n        # filter and should yield zero results.\n        if len(query_string) <= 1 and query_string != \"*\":\n            return {\"results\": [], \"hits\": 0}\n\n        reverse = False\n\n        if sort_by is not None:\n            # Determine if we need to reverse the results and if Whoosh can\n            # handle what it's being asked to sort by. Reversing is an\n            # all-or-nothing action, unfortunately.\n            sort_by_list = []\n            reverse_counter = 0\n\n            for order_by in sort_by:\n                if order_by.startswith(\"-\"):\n                    reverse_counter += 1\n\n            if reverse_counter and reverse_counter != len(sort_by):\n                raise SearchBackendError(\n                    \"Whoosh requires all order_by fields\"\n                    \" to use the same sort direction\"\n                )\n\n            for order_by in sort_by:\n                if order_by.startswith(\"-\"):\n                    sort_by_list.append(order_by[1:])\n\n                    if len(sort_by_list) == 1:\n                        reverse = True\n                else:\n                    sort_by_list.append(order_by)\n\n                    if len(sort_by_list) == 1:\n                        reverse = False\n\n            sort_by = sort_by_list\n\n        if facets is not None:\n            warnings.warn(\"Whoosh does not handle faceting.\", Warning, stacklevel=2)\n\n        if date_facets is not None:\n            warnings.warn(\n                \"Whoosh does not handle date faceting.\", Warning, stacklevel=2\n            )\n\n        if query_facets is not None:\n            warnings.warn(\n                \"Whoosh does not handle query faceting.\", Warning, stacklevel=2\n            )\n\n        narrowed_results = None\n        self.index = self.index.refresh()\n\n        if limit_to_registered_models is None:\n            limit_to_registered_models = getattr(\n                settings, \"HAYSTACK_LIMIT_TO_REGISTERED_MODELS\", True\n            )\n\n        if models and len(models):\n            model_choices = sorted(get_model_ct(model) for model in models)\n        elif limit_to_registered_models:\n            # Using narrow queries, limit the results to only models handled\n            # with the current routers.\n            model_choices = self.build_models_list()\n        else:\n            model_choices = []\n\n        if len(model_choices) > 0:\n            if narrow_queries is None:\n                narrow_queries = set()\n\n            narrow_queries.add(\n                \" OR \".join([\"%s:%s\" % (DJANGO_CT, rm) for rm in model_choices])\n            )\n\n        narrow_searcher = None\n\n        if narrow_queries is not None:\n            # Potentially expensive? I don't see another way to do it in Whoosh...\n            narrow_searcher = self.index.searcher()\n\n            for nq in narrow_queries:\n                recent_narrowed_results = narrow_searcher.search(\n                    self.parser.parse(force_str(nq)), limit=None\n                )\n\n                if len(recent_narrowed_results) <= 0:\n                    return {\"results\": [], \"hits\": 0}\n\n                if narrowed_results:\n                    narrowed_results.filter(recent_narrowed_results)\n                else:\n                    narrowed_results = recent_narrowed_results\n\n        self.index = self.index.refresh()\n\n        if self.index.doc_count():\n            searcher = self.index.searcher()\n            parsed_query = self.parser.parse(query_string)\n\n            # In the event of an invalid/stopworded query, recover gracefully.\n            if parsed_query is None:\n                return {\"results\": [], \"hits\": 0}\n\n            page_num, page_length = self.calculate_page(start_offset, end_offset)\n\n            search_kwargs = {\n                \"pagelen\": page_length,\n                \"sortedby\": sort_by,\n                \"reverse\": reverse,\n            }\n\n            # Handle the case where the results have been narrowed.\n            if narrowed_results is not None:\n                search_kwargs[\"filter\"] = narrowed_results\n\n            try:\n                raw_page = searcher.search_page(parsed_query, page_num, **search_kwargs)\n            except ValueError:\n                if not self.silently_fail:\n                    raise\n\n                return {\"results\": [], \"hits\": 0, \"spelling_suggestion\": None}\n\n            # Because as of Whoosh 2.5.1, it will return the wrong page of\n            # results if you request something too high. :(\n            if raw_page.pagenum < page_num:\n                return {\"results\": [], \"hits\": 0, \"spelling_suggestion\": None}\n\n            results = self._process_results(\n                raw_page,\n                highlight=highlight,\n                query_string=query_string,\n                spelling_query=spelling_query,\n                result_class=result_class,\n            )\n            searcher.close()\n\n            if hasattr(narrow_searcher, \"close\"):\n                narrow_searcher.close()\n\n            return results\n        else:\n            if self.include_spelling:\n                if spelling_query:\n                    spelling_suggestion = self.create_spelling_suggestion(\n                        spelling_query\n                    )\n                else:\n                    spelling_suggestion = self.create_spelling_suggestion(query_string)\n            else:\n                spelling_suggestion = None\n\n            return {\n                \"results\": [],\n                \"hits\": 0,\n                \"spelling_suggestion\": spelling_suggestion,\n            }\n\n    def more_like_this(\n        self,\n        model_instance,\n        additional_query_string=None,\n        start_offset=0,\n        end_offset=None,\n        models=None,\n        limit_to_registered_models=None,\n        result_class=None,\n        **kwargs\n    ):\n        if not self.setup_complete:\n            self.setup()\n\n        field_name = self.content_field_name\n        narrow_queries = set()\n        narrowed_results = None\n        self.index = self.index.refresh()\n\n        if limit_to_registered_models is None:\n            limit_to_registered_models = getattr(\n                settings, \"HAYSTACK_LIMIT_TO_REGISTERED_MODELS\", True\n            )\n\n        if models and len(models):\n            model_choices = sorted(get_model_ct(model) for model in models)\n        elif limit_to_registered_models:\n            # Using narrow queries, limit the results to only models handled\n            # with the current routers.\n            model_choices = self.build_models_list()\n        else:\n            model_choices = []\n\n        if len(model_choices) > 0:\n            if narrow_queries is None:\n                narrow_queries = set()\n\n            narrow_queries.add(\n                \" OR \".join([\"%s:%s\" % (DJANGO_CT, rm) for rm in model_choices])\n            )\n\n        if additional_query_string and additional_query_string != \"*\":\n            narrow_queries.add(additional_query_string)\n\n        narrow_searcher = None\n\n        if narrow_queries is not None:\n            # Potentially expensive? I don't see another way to do it in Whoosh...\n            narrow_searcher = self.index.searcher()\n\n            for nq in narrow_queries:\n                recent_narrowed_results = narrow_searcher.search(\n                    self.parser.parse(force_str(nq)), limit=None\n                )\n\n                if len(recent_narrowed_results) <= 0:\n                    return {\"results\": [], \"hits\": 0}\n\n                if narrowed_results:\n                    narrowed_results.filter(recent_narrowed_results)\n                else:\n                    narrowed_results = recent_narrowed_results\n\n        page_num, page_length = self.calculate_page(start_offset, end_offset)\n\n        self.index = self.index.refresh()\n        raw_results = EmptyResults()\n\n        searcher = None\n        if self.index.doc_count():\n            query = \"%s:%s\" % (ID, get_identifier(model_instance))\n            searcher = self.index.searcher()\n            parsed_query = self.parser.parse(query)\n            results = searcher.search(parsed_query)\n\n            if len(results):\n                raw_results = results[0].more_like_this(field_name, top=end_offset)\n\n            # Handle the case where the results have been narrowed.\n            if narrowed_results is not None and hasattr(raw_results, \"filter\"):\n                raw_results.filter(narrowed_results)\n\n        try:\n            raw_page = ResultsPage(raw_results, page_num, page_length)\n        except ValueError:\n            if not self.silently_fail:\n                raise\n\n            return {\"results\": [], \"hits\": 0, \"spelling_suggestion\": None}\n\n        # Because as of Whoosh 2.5.1, it will return the wrong page of\n        # results if you request something too high. :(\n        if raw_page.pagenum < page_num:\n            return {\"results\": [], \"hits\": 0, \"spelling_suggestion\": None}\n\n        results = self._process_results(raw_page, result_class=result_class)\n\n        if searcher:\n            searcher.close()\n\n        if hasattr(narrow_searcher, \"close\"):\n            narrow_searcher.close()\n\n        return results\n\n    def _process_results(\n        self,\n        raw_page,\n        highlight=False,\n        query_string=\"\",\n        spelling_query=None,\n        result_class=None,\n    ):\n        from haystack import connections\n\n        results = []\n\n        # It's important to grab the hits first before slicing. Otherwise, this\n        # can cause pagination failures.\n        hits = len(raw_page)\n\n        if result_class is None:\n            result_class = SearchResult\n\n        facets = {}\n        spelling_suggestion = None\n        unified_index = connections[self.connection_alias].get_unified_index()\n        indexed_models = unified_index.get_indexed_models()\n\n        for doc_offset, raw_result in enumerate(raw_page):\n            score = raw_page.score(doc_offset) or 0\n            app_label, model_name = raw_result[DJANGO_CT].split(\".\")\n            additional_fields = {}\n            model = haystack_get_model(app_label, model_name)\n\n            if model and model in indexed_models:\n                for key, value in raw_result.items():\n                    index = unified_index.get_index(model)\n                    string_key = str(key)\n\n                    if string_key in index.fields and hasattr(\n                        index.fields[string_key], \"convert\"\n                    ):\n                        # Special-cased due to the nature of KEYWORD fields.\n                        if index.fields[string_key].is_multivalued:\n                            if value is None or len(value) == 0:\n                                additional_fields[string_key] = []\n                            else:\n                                additional_fields[string_key] = value.split(\",\")\n                        else:\n                            additional_fields[string_key] = index.fields[\n                                string_key\n                            ].convert(value)\n                    else:\n                        additional_fields[string_key] = self._to_python(value)\n\n                del additional_fields[DJANGO_CT]\n                del additional_fields[DJANGO_ID]\n\n                if highlight:\n                    sa = StemmingAnalyzer()\n                    formatter = WhooshHtmlFormatter(\"em\")\n                    terms = [token.text for token in sa(query_string)]\n\n                    whoosh_result = whoosh_highlight(\n                        additional_fields.get(self.content_field_name),\n                        terms,\n                        sa,\n                        ContextFragmenter(),\n                        formatter,\n                    )\n                    additional_fields[\"highlighted\"] = {\n                        self.content_field_name: [whoosh_result]\n                    }\n\n                result = result_class(\n                    app_label,\n                    model_name,\n                    raw_result[DJANGO_ID],\n                    score,\n                    **additional_fields\n                )\n                results.append(result)\n            else:\n                hits -= 1\n\n        if self.include_spelling:\n            if spelling_query:\n                spelling_suggestion = self.create_spelling_suggestion(spelling_query)\n            else:\n                spelling_suggestion = self.create_spelling_suggestion(query_string)\n\n        return {\n            \"results\": results,\n            \"hits\": hits,\n            \"facets\": facets,\n            \"spelling_suggestion\": spelling_suggestion,\n        }\n\n    def create_spelling_suggestion(self, query_string):\n        spelling_suggestion = None\n        reader = self.index.reader()\n        corrector = reader.corrector(self.content_field_name)\n        cleaned_query = force_str(query_string)\n\n        if not query_string:\n            return spelling_suggestion\n\n        # Clean the string.\n        for rev_word in self.RESERVED_WORDS:\n            cleaned_query = cleaned_query.replace(rev_word, \"\")\n\n        for rev_char in self.RESERVED_CHARACTERS:\n            cleaned_query = cleaned_query.replace(rev_char, \"\")\n\n        # Break it down.\n        query_words = cleaned_query.split()\n        suggested_words = []\n\n        for word in query_words:\n            suggestions = corrector.suggest(word, limit=1)\n\n            if len(suggestions) > 0:\n                suggested_words.append(suggestions[0])\n\n        spelling_suggestion = \" \".join(suggested_words)\n        return spelling_suggestion\n\n    def _from_python(self, value):\n        \"\"\"\n        Converts Python values to a string for Whoosh.\n\n        Code courtesy of pysolr.\n        \"\"\"\n        if hasattr(value, \"strftime\"):\n            if not hasattr(value, \"hour\"):\n                value = datetime(value.year, value.month, value.day, 0, 0, 0)\n        elif isinstance(value, bool):\n            if value:\n                value = \"true\"\n            else:\n                value = \"false\"\n        elif isinstance(value, (list, tuple)):\n            value = \",\".join([force_str(v) for v in value])\n        elif isinstance(value, (int, float)):\n            # Leave it alone.\n            pass\n        else:\n            value = force_str(value)\n        return value\n\n    def _to_python(self, value):\n        \"\"\"\n        Converts values from Whoosh to native Python values.\n\n        A port of the same method in pysolr, as they deal with data the same way.\n        \"\"\"\n        if value == \"true\":\n            return True\n        elif value == \"false\":\n            return False\n\n        if value and isinstance(value, str):\n            possible_datetime = DATETIME_REGEX.search(value)\n\n            if possible_datetime:\n                date_values = possible_datetime.groupdict()\n\n                for dk, dv in date_values.items():\n                    date_values[dk] = int(dv)\n\n                return datetime(\n                    date_values[\"year\"],\n                    date_values[\"month\"],\n                    date_values[\"day\"],\n                    date_values[\"hour\"],\n                    date_values[\"minute\"],\n                    date_values[\"second\"],\n                )\n\n        try:\n            # Attempt to use json to load the values.\n            converted_value = json.loads(value)\n\n            # Try to handle most built-in types.\n            if isinstance(\n                converted_value,\n                (list, tuple, set, dict, int, float, complex),\n            ):\n                return converted_value\n        except Exception:\n            # If it fails (SyntaxError or its ilk) or we don't trust it,\n            # continue on.\n            pass\n\n        return value\n\n\nclass WhooshSearchQuery(BaseSearchQuery):\n    def _convert_datetime(self, date):\n        if hasattr(date, \"hour\"):\n            return force_str(date.strftime(\"%Y%m%d%H%M%S\"))\n        else:\n            return force_str(date.strftime(\"%Y%m%d000000\"))\n\n    def clean(self, query_fragment):\n        \"\"\"\n        Provides a mechanism for sanitizing user input before presenting the\n        value to the backend.\n\n        Whoosh 1.X differs here in that you can no longer use a backslash\n        to escape reserved characters. Instead, the whole word should be\n        quoted.\n        \"\"\"\n        words = query_fragment.split()\n        cleaned_words = []\n\n        for word in words:\n            if word in self.backend.RESERVED_WORDS:\n                word = word.replace(word, word.lower())\n\n            for char in self.backend.RESERVED_CHARACTERS:\n                if char in word:\n                    word = \"'%s'\" % word\n                    break\n\n            cleaned_words.append(word)\n\n        return \" \".join(cleaned_words)\n\n    def build_query_fragment(self, field, filter_type, value):\n        from haystack import connections\n\n        query_frag = \"\"\n        is_datetime = False\n\n        if not hasattr(value, \"input_type_name\"):\n            # Handle when we've got a ``ValuesListQuerySet``...\n            if hasattr(value, \"values_list\"):\n                value = list(value)\n\n            if hasattr(value, \"strftime\"):\n                is_datetime = True\n\n            if isinstance(value, str) and value != \" \":\n                # It's not an ``InputType``. Assume ``Clean``.\n                value = Clean(value)\n            else:\n                value = PythonData(value)\n\n        # Prepare the query using the InputType.\n        prepared_value = value.prepare(self)\n\n        if not isinstance(prepared_value, (set, list, tuple)):\n            # Then convert whatever we get back to what pysolr wants if needed.\n            prepared_value = self.backend._from_python(prepared_value)\n\n        # 'content' is a special reserved word, much like 'pk' in\n        # Django's ORM layer. It indicates 'no special field'.\n        if field == \"content\":\n            index_fieldname = \"\"\n        else:\n            index_fieldname = \"%s:\" % connections[\n                self._using\n            ].get_unified_index().get_index_fieldname(field)\n\n        filter_types = {\n            \"content\": \"%s\",\n            \"contains\": \"*%s*\",\n            \"endswith\": \"*%s\",\n            \"startswith\": \"%s*\",\n            \"exact\": \"%s\",\n            \"gt\": \"{%s to}\",\n            \"gte\": \"[%s to]\",\n            \"lt\": \"{to %s}\",\n            \"lte\": \"[to %s]\",\n            \"fuzzy\": \"%s~{}/%d\".format(FUZZY_WHOOSH_MAX_EDITS),\n        }\n\n        if value.post_process is False:\n            query_frag = prepared_value\n        else:\n            if filter_type in [\n                \"content\",\n                \"contains\",\n                \"startswith\",\n                \"endswith\",\n                \"fuzzy\",\n            ]:\n                if value.input_type_name == \"exact\":\n                    query_frag = prepared_value\n                else:\n                    # Iterate over terms & incorportate the converted form of each into the query.\n                    terms = []\n\n                    if isinstance(prepared_value, str):\n                        possible_values = prepared_value.split(\" \")\n                    else:\n                        if is_datetime is True:\n                            prepared_value = self._convert_datetime(prepared_value)\n\n                        possible_values = [prepared_value]\n\n                    for possible_value in possible_values:\n                        possible_value_str = self.backend._from_python(possible_value)\n                        if filter_type == \"fuzzy\":\n                            terms.append(\n                                filter_types[filter_type]\n                                % (\n                                    possible_value_str,\n                                    min(\n                                        FUZZY_WHOOSH_MIN_PREFIX, len(possible_value_str)\n                                    ),\n                                )\n                            )\n                        else:\n                            terms.append(filter_types[filter_type] % possible_value_str)\n\n                    if len(terms) == 1:\n                        query_frag = terms[0]\n                    else:\n                        query_frag = \"(%s)\" % \" AND \".join(terms)\n            elif filter_type == \"in\":\n                in_options = []\n\n                for possible_value in prepared_value:\n                    is_datetime = False\n\n                    if hasattr(possible_value, \"strftime\"):\n                        is_datetime = True\n\n                    pv = self.backend._from_python(possible_value)\n\n                    if is_datetime is True:\n                        pv = self._convert_datetime(pv)\n\n                    if isinstance(pv, str) and not is_datetime:\n                        in_options.append('\"%s\"' % pv)\n                    else:\n                        in_options.append(\"%s\" % pv)\n\n                query_frag = \"(%s)\" % \" OR \".join(in_options)\n            elif filter_type == \"range\":\n                start = self.backend._from_python(prepared_value[0])\n                end = self.backend._from_python(prepared_value[1])\n\n                if hasattr(prepared_value[0], \"strftime\"):\n                    start = self._convert_datetime(start)\n\n                if hasattr(prepared_value[1], \"strftime\"):\n                    end = self._convert_datetime(end)\n\n                query_frag = \"[%s to %s]\" % (start, end)\n            elif filter_type == \"exact\":\n                if value.input_type_name == \"exact\":\n                    query_frag = prepared_value\n                else:\n                    prepared_value = Exact(prepared_value).prepare(self)\n                    query_frag = filter_types[filter_type] % prepared_value\n            else:\n                if is_datetime is True:\n                    prepared_value = self._convert_datetime(prepared_value)\n\n                query_frag = filter_types[filter_type] % prepared_value\n\n        if len(query_frag) and not isinstance(value, Raw):\n            if not query_frag.startswith(\"(\") and not query_frag.endswith(\")\"):\n                query_frag = \"(%s)\" % query_frag\n\n        return \"%s%s\" % (index_fieldname, query_frag)\n\n\nclass WhooshEngine(BaseEngine):\n    backend = WhooshSearchBackend\n    query = WhooshSearchQuery\n"
  },
  {
    "path": "haystack/constants.py",
    "content": "from django.conf import settings\n\nDEFAULT_ALIAS = \"default\"\n\n# Reserved field names\nID = getattr(settings, \"HAYSTACK_ID_FIELD\", \"id\")\nDJANGO_CT = getattr(settings, \"HAYSTACK_DJANGO_CT_FIELD\", \"django_ct\")\nDJANGO_ID = getattr(settings, \"HAYSTACK_DJANGO_ID_FIELD\", \"django_id\")\nDOCUMENT_FIELD = getattr(settings, \"HAYSTACK_DOCUMENT_FIELD\", \"text\")\n\n# Default operator. Valid options are AND/OR.\nDEFAULT_OPERATOR = getattr(settings, \"HAYSTACK_DEFAULT_OPERATOR\", \"AND\")\n\n# Default values on elasticsearch\nFUZZINESS = getattr(settings, \"HAYSTACK_FUZZINESS\", \"AUTO\")\nFUZZY_MIN_SIM = getattr(settings, \"HAYSTACK_FUZZY_MIN_SIM\", 0.5)\nFUZZY_MAX_EXPANSIONS = getattr(settings, \"HAYSTACK_FUZZY_MAX_EXPANSIONS\", 50)\n\n# Default values on whoosh\nFUZZY_WHOOSH_MIN_PREFIX = getattr(settings, \"HAYSTACK_FUZZY_WHOOSH_MIN_PREFIX\", 3)\nFUZZY_WHOOSH_MAX_EDITS = getattr(settings, \"HAYSTACK_FUZZY_WHOOSH_MAX_EDITS\", 2)\n\n# Valid expression extensions.\nVALID_FILTERS = {\n    \"contains\",\n    \"exact\",\n    \"gt\",\n    \"gte\",\n    \"lt\",\n    \"lte\",\n    \"in\",\n    \"startswith\",\n    \"range\",\n    \"endswith\",\n    \"content\",\n    \"fuzzy\",\n}\n\n\nFILTER_SEPARATOR = \"__\"\n\n# The maximum number of items to display in a SearchQuerySet.__repr__\nREPR_OUTPUT_SIZE = 20\n\n# Number of SearchResults to load at a time.\nITERATOR_LOAD_PER_QUERY = getattr(settings, \"HAYSTACK_ITERATOR_LOAD_PER_QUERY\", 10)\n\n\n# A marker class in the hierarchy to indicate that it handles search data.\nclass Indexable(object):\n    haystack_use_for_indexing = True\n\n\n# For the geo bits, since that's what Solr & Elasticsearch seem to silently\n# assume...\nWGS_84_SRID = 4326\n"
  },
  {
    "path": "haystack/exceptions.py",
    "content": "class HaystackError(Exception):\n    \"\"\"A generic exception for all others to extend.\"\"\"\n\n    pass\n\n\nclass SearchBackendError(HaystackError):\n    \"\"\"Raised when a backend can not be found.\"\"\"\n\n    pass\n\n\nclass SearchFieldError(HaystackError):\n    \"\"\"Raised when a field encounters an error.\"\"\"\n\n    pass\n\n\nclass MissingDependency(HaystackError):\n    \"\"\"Raised when a library a backend depends on can not be found.\"\"\"\n\n    pass\n\n\nclass NotHandled(HaystackError):\n    \"\"\"Raised when a model is not handled by the router setup.\"\"\"\n\n    pass\n\n\nclass MoreLikeThisError(HaystackError):\n    \"\"\"Raised when a model instance has not been provided for More Like This.\"\"\"\n\n    pass\n\n\nclass FacetingError(HaystackError):\n    \"\"\"Raised when incorrect arguments have been provided for faceting.\"\"\"\n\n    pass\n\n\nclass SpatialError(HaystackError):\n    \"\"\"Raised when incorrect arguments have been provided for spatial.\"\"\"\n\n    pass\n\n\nclass StatsError(HaystackError):\n    \"Raised when incorrect arguments have been provided for stats\"\n    pass\n\n\nclass SkipDocument(HaystackError):\n    \"\"\"Raised when a document should be skipped while updating\"\"\"\n\n    pass\n"
  },
  {
    "path": "haystack/fields.py",
    "content": "import re\nfrom inspect import ismethod\n\nfrom django.template import loader\nfrom django.utils import datetime_safe\nfrom whoosh import analysis\n\nfrom haystack.exceptions import SearchFieldError\nfrom haystack.utils import get_model_ct_tuple\n\n\nclass NOT_PROVIDED:\n    pass\n\n\n# Note that dates in the full ISO 8601 format will be accepted as long as the hour/minute/second components\n# are zeroed for compatibility with search backends which lack a date time distinct from datetime:\nDATE_REGEX = re.compile(\n    r\"^(?P<year>\\d{4})-(?P<month>\\d{2})-(?P<day>\\d{2})(?:|T00:00:00Z?)$\"\n)\nDATETIME_REGEX = re.compile(\n    r\"^(?P<year>\\d{4})-(?P<month>\\d{2})-(?P<day>\\d{2})(T|\\s+)(?P<hour>\\d{2}):(?P<minute>\\d{2}):(?P<second>\\d{2}).*?$\"\n)\n\n\n# All the SearchFields variants.\n\n\nclass SearchField(object):\n    \"\"\"The base implementation of a search field.\"\"\"\n\n    field_type = None\n\n    def __init__(\n        self,\n        model_attr=None,\n        use_template=False,\n        template_name=None,\n        document=False,\n        indexed=True,\n        stored=True,\n        faceted=False,\n        default=NOT_PROVIDED,\n        null=False,\n        index_fieldname=None,\n        facet_class=None,\n        boost=1.0,\n        weight=None,\n        analyzer=NOT_PROVIDED,\n    ):\n        # Track what the index thinks this field is called.\n        self.instance_name = None\n        self.model_attr = model_attr\n        self.use_template = use_template\n        self.template_name = template_name\n        self.document = document\n        self.indexed = indexed\n        self.stored = stored\n        self.faceted = faceted\n        self._default = default\n        self.null = null\n        self.index_fieldname = index_fieldname\n        self.boost = weight or boost\n        self._analyzer = analyzer\n        self.is_multivalued = False\n\n        # We supply the facet_class for making it easy to create a faceted\n        # field based off of this field.\n        self.facet_class = facet_class\n\n        if self.facet_class is None:\n            self.facet_class = FacetCharField\n\n        self.set_instance_name(None)\n\n    @property\n    def analyzer(self):\n        if self._analyzer is NOT_PROVIDED:\n            return None\n        return self._analyzer\n\n    def set_instance_name(self, instance_name):\n        self.instance_name = instance_name\n\n        if self.index_fieldname is None:\n            self.index_fieldname = self.instance_name\n\n    def has_default(self):\n        \"\"\"Returns a boolean of whether this field has a default value.\"\"\"\n        return self._default is not NOT_PROVIDED\n\n    @property\n    def default(self):\n        \"\"\"Returns the default value for the field.\"\"\"\n        if callable(self._default):\n            return self._default()\n\n        return self._default\n\n    def prepare(self, obj):\n        \"\"\"\n        Takes data from the provided object and prepares it for storage in the\n        index.\n        \"\"\"\n        # Give priority to a template.\n        if self.use_template:\n            return self.prepare_template(obj)\n        elif self.model_attr is not None:\n            attrs = self.split_model_attr_lookups()\n            current_objects = [obj]\n\n            values = self.resolve_attributes_lookup(current_objects, attrs)\n\n            if len(values) == 1:\n                return values[0]\n            elif len(values) > 1:\n                return values\n\n        if self.has_default():\n            return self.default\n        else:\n            return None\n\n    def resolve_attributes_lookup(self, current_objects, attributes):\n        \"\"\"\n        Recursive method that looks, for one or more objects, for an attribute that can be multiple\n        objects (relations) deep.\n        \"\"\"\n        values = []\n\n        for current_object in current_objects:\n            if not hasattr(current_object, attributes[0]):\n                raise SearchFieldError(\n                    \"The model '%r' does not have a model_attr '%s'.\"\n                    % (repr(current_object), attributes[0])\n                )\n\n            if len(attributes) > 1:\n                current_objects_in_attr = self.get_iterable_objects(\n                    getattr(current_object, attributes[0])\n                )\n                values.extend(\n                    self.resolve_attributes_lookup(\n                        current_objects_in_attr, attributes[1:]\n                    )\n                )\n                continue\n\n            current_object = getattr(current_object, attributes[0])\n\n            if current_object is None:\n                if self.has_default():\n                    current_object = self._default\n                elif self.null:\n                    current_object = None\n                else:\n                    raise SearchFieldError(\n                        \"The model '%s' combined with model_attr '%s' returned None, but doesn't allow \"\n                        \"a default or null value.\"\n                        % (repr(current_object), self.model_attr)\n                    )\n\n            if callable(current_object):\n                values.append(current_object())\n            else:\n                values.append(current_object)\n\n        return values\n\n    def split_model_attr_lookups(self):\n        \"\"\"Returns list of nested attributes for looking through the relation.\"\"\"\n        return self.model_attr.split(\"__\")\n\n    @classmethod\n    def get_iterable_objects(cls, current_objects):\n        \"\"\"\n        Returns iterable of objects that contain data. For example, resolves Django ManyToMany relationship\n        so the attributes of the related models can then be accessed.\n        \"\"\"\n        if current_objects is None:\n            return []\n\n        if hasattr(current_objects, \"all\"):\n            # i.e, Django ManyToMany relationships\n            if ismethod(current_objects.all):\n                return current_objects.all()\n            return []\n\n        elif not hasattr(current_objects, \"__iter__\"):\n            current_objects = [current_objects]\n\n        return current_objects\n\n    def prepare_template(self, obj):\n        \"\"\"\n        Flattens an object for indexing.\n\n        This loads a template\n        (``search/indexes/{app_label}/{model_name}_{field_name}.txt``) and\n        returns the result of rendering that template. ``object`` will be in\n        its context.\n        \"\"\"\n        if self.instance_name is None and self.template_name is None:\n            raise SearchFieldError(\n                \"This field requires either its instance_name variable to be populated or an explicit template_name in order to load the correct template.\"\n            )\n\n        if self.template_name is not None:\n            template_names = self.template_name\n\n            if not isinstance(template_names, (list, tuple)):\n                template_names = [template_names]\n        else:\n            app_label, model_name = get_model_ct_tuple(obj)\n            template_names = [\n                \"search/indexes/%s/%s_%s.txt\"\n                % (app_label, model_name, self.instance_name)\n            ]\n\n        t = loader.select_template(template_names)\n        return t.render({\"object\": obj})\n\n    def convert(self, value):\n        \"\"\"\n        Handles conversion between the data found and the type of the field.\n\n        Extending classes should override this method and provide correct\n        data coercion.\n        \"\"\"\n        return value\n\n\nclass CharField(SearchField):\n    field_type = \"string\"\n\n    def __init__(self, analyzer=NOT_PROVIDED, **kwargs):\n        if kwargs.get(\"facet_class\") is None:\n            kwargs[\"facet_class\"] = FacetCharField\n\n        # use StemmingAnalyzer by default\n        kwargs[\"analyzer\"] = (\n            analysis.StemmingAnalyzer() if analyzer is NOT_PROVIDED else analyzer\n        )\n\n        super().__init__(**kwargs)\n\n    def prepare(self, obj):\n        return self.convert(super().prepare(obj))\n\n    def convert(self, value):\n        if value is None:\n            return None\n\n        return str(value)\n\n\nclass LocationField(SearchField):\n    field_type = \"location\"\n\n    def prepare(self, obj):\n        from haystack.utils.geo import ensure_point\n\n        value = super().prepare(obj)\n\n        if value is None:\n            return None\n\n        pnt = ensure_point(value)\n        pnt_lng, pnt_lat = pnt.coords\n        return \"%s,%s\" % (pnt_lat, pnt_lng)\n\n    def convert(self, value):\n        from django.contrib.gis.geos import Point\n\n        from haystack.utils.geo import ensure_point\n\n        if value is None:\n            return None\n\n        if hasattr(value, \"geom_type\"):\n            value = ensure_point(value)\n            return value\n\n        if isinstance(value, str):\n            lat, lng = value.split(\",\")\n        elif isinstance(value, (list, tuple)):\n            # GeoJSON-alike\n            lat, lng = value[1], value[0]\n        elif isinstance(value, dict):\n            lat = value.get(\"lat\", 0)\n            lng = value.get(\"lon\", 0)\n        else:\n            raise TypeError(\"Unable to extract coordinates from %r\" % value)\n\n        value = Point(float(lng), float(lat))\n        return value\n\n\nclass NgramField(CharField):\n    field_type = \"ngram\"\n\n    def __init__(self, **kwargs):\n        if kwargs.get(\"faceted\") is True:\n            raise SearchFieldError(\"%s can not be faceted.\" % self.__class__.__name__)\n\n        super().__init__(**kwargs)\n\n\nclass EdgeNgramField(NgramField):\n    field_type = \"edge_ngram\"\n\n\nclass IntegerField(SearchField):\n    field_type = \"integer\"\n\n    def __init__(self, **kwargs):\n        if kwargs.get(\"facet_class\") is None:\n            kwargs[\"facet_class\"] = FacetIntegerField\n\n        super().__init__(**kwargs)\n\n    def prepare(self, obj):\n        return self.convert(super().prepare(obj))\n\n    def convert(self, value):\n        if value is None:\n            return None\n\n        return int(value)\n\n\nclass FloatField(SearchField):\n    field_type = \"float\"\n\n    def __init__(self, **kwargs):\n        if kwargs.get(\"facet_class\") is None:\n            kwargs[\"facet_class\"] = FacetFloatField\n\n        super().__init__(**kwargs)\n\n    def prepare(self, obj):\n        return self.convert(super().prepare(obj))\n\n    def convert(self, value):\n        if value is None:\n            return None\n\n        return float(value)\n\n\nclass DecimalField(SearchField):\n    field_type = \"string\"\n\n    def __init__(self, **kwargs):\n        if kwargs.get(\"facet_class\") is None:\n            kwargs[\"facet_class\"] = FacetDecimalField\n\n        super().__init__(**kwargs)\n\n    def prepare(self, obj):\n        return self.convert(super().prepare(obj))\n\n    def convert(self, value):\n        if value is None:\n            return None\n\n        return str(value)\n\n\nclass BooleanField(SearchField):\n    field_type = \"boolean\"\n\n    def __init__(self, **kwargs):\n        if kwargs.get(\"facet_class\") is None:\n            kwargs[\"facet_class\"] = FacetBooleanField\n\n        super().__init__(**kwargs)\n\n    def prepare(self, obj):\n        return self.convert(super().prepare(obj))\n\n    def convert(self, value):\n        if value is None:\n            return None\n\n        return bool(value)\n\n\nclass DateField(SearchField):\n    field_type = \"date\"\n\n    def __init__(self, **kwargs):\n        if kwargs.get(\"facet_class\") is None:\n            kwargs[\"facet_class\"] = FacetDateField\n\n        super().__init__(**kwargs)\n\n    def prepare(self, obj):\n        return self.convert(super().prepare(obj))\n\n    def convert(self, value):\n        if value is None:\n            return None\n\n        if isinstance(value, str):\n            match = DATE_REGEX.search(value)\n\n            if match:\n                data = match.groupdict()\n                return datetime_safe.date(\n                    int(data[\"year\"]), int(data[\"month\"]), int(data[\"day\"])\n                )\n            else:\n                raise SearchFieldError(\n                    \"Date provided to '%s' field doesn't appear to be a valid date string: '%s'\"\n                    % (self.instance_name, value)\n                )\n\n        return value\n\n\nclass DateTimeField(SearchField):\n    field_type = \"datetime\"\n\n    def __init__(self, **kwargs):\n        if kwargs.get(\"facet_class\") is None:\n            kwargs[\"facet_class\"] = FacetDateTimeField\n\n        super().__init__(**kwargs)\n\n    def prepare(self, obj):\n        return self.convert(super().prepare(obj))\n\n    def convert(self, value):\n        if value is None:\n            return None\n\n        if isinstance(value, str):\n            match = DATETIME_REGEX.search(value)\n\n            if match:\n                data = match.groupdict()\n                return datetime_safe.datetime(\n                    int(data[\"year\"]),\n                    int(data[\"month\"]),\n                    int(data[\"day\"]),\n                    int(data[\"hour\"]),\n                    int(data[\"minute\"]),\n                    int(data[\"second\"]),\n                )\n            else:\n                raise SearchFieldError(\n                    \"Datetime provided to '%s' field doesn't appear to be a valid datetime string: '%s'\"\n                    % (self.instance_name, value)\n                )\n\n        return value\n\n\nclass MultiValueField(SearchField):\n    field_type = \"string\"\n\n    def __init__(self, **kwargs):\n        if kwargs.get(\"facet_class\") is None:\n            kwargs[\"facet_class\"] = FacetMultiValueField\n\n        if kwargs.get(\"use_template\") is True:\n            raise SearchFieldError(\n                \"'%s' fields can not use templates to prepare their data.\"\n                % self.__class__.__name__\n            )\n\n        super().__init__(**kwargs)\n        self.is_multivalued = True\n\n    def prepare(self, obj):\n        return self.convert(super().prepare(obj))\n\n    def convert(self, value):\n        if value is None:\n            return None\n\n        if hasattr(value, \"__iter__\") and not isinstance(value, str):\n            return value\n\n        return [value]\n\n\nclass FacetField(SearchField):\n    \"\"\"\n    ``FacetField`` is slightly different than the other fields because it can\n    work in conjunction with other fields as its data source.\n\n    Accepts an optional ``facet_for`` kwarg, which should be the field name\n    (not ``index_fieldname``) of the field it should pull data from.\n    \"\"\"\n\n    instance_name = None\n\n    def __init__(self, **kwargs):\n        handled_kwargs = self.handle_facet_parameters(kwargs)\n        super().__init__(**handled_kwargs)\n\n    def handle_facet_parameters(self, kwargs):\n        if kwargs.get(\"faceted\", False):\n            raise SearchFieldError(\n                \"FacetField (%s) does not accept the 'faceted' argument.\"\n                % self.instance_name\n            )\n\n        if not kwargs.get(\"null\", True):\n            raise SearchFieldError(\n                \"FacetField (%s) does not accept False for the 'null' argument.\"\n                % self.instance_name\n            )\n\n        if not kwargs.get(\"indexed\", True):\n            raise SearchFieldError(\n                \"FacetField (%s) does not accept False for the 'indexed' argument.\"\n                % self.instance_name\n            )\n\n        if kwargs.get(\"facet_class\"):\n            raise SearchFieldError(\n                \"FacetField (%s) does not accept the 'facet_class' argument.\"\n                % self.instance_name\n            )\n\n        self.facet_for = None\n        self.facet_class = None\n\n        # Make sure the field is nullable.\n        kwargs[\"null\"] = True\n\n        if \"facet_for\" in kwargs:\n            self.facet_for = kwargs[\"facet_for\"]\n            del kwargs[\"facet_for\"]\n\n        return kwargs\n\n    def get_facet_for_name(self):\n        return self.facet_for or self.instance_name\n\n\nclass FacetCharField(FacetField, CharField):\n    pass\n\n\nclass FacetIntegerField(FacetField, IntegerField):\n    pass\n\n\nclass FacetFloatField(FacetField, FloatField):\n    pass\n\n\nclass FacetDecimalField(FacetField, DecimalField):\n    pass\n\n\nclass FacetBooleanField(FacetField, BooleanField):\n    pass\n\n\nclass FacetDateField(FacetField, DateField):\n    pass\n\n\nclass FacetDateTimeField(FacetField, DateTimeField):\n    pass\n\n\nclass FacetMultiValueField(FacetField, MultiValueField):\n    pass\n"
  },
  {
    "path": "haystack/forms.py",
    "content": "from django import forms\nfrom django.utils.encoding import smart_text\nfrom django.utils.text import capfirst\nfrom django.utils.translation import gettext_lazy as _\n\nfrom haystack import connections\nfrom haystack.constants import DEFAULT_ALIAS\nfrom haystack.query import EmptySearchQuerySet, SearchQuerySet\nfrom haystack.utils import get_model_ct\nfrom haystack.utils.app_loading import haystack_get_model\n\n\ndef model_choices(using=DEFAULT_ALIAS):\n    choices = [\n        (get_model_ct(m), capfirst(smart_text(m._meta.verbose_name_plural)))\n        for m in connections[using].get_unified_index().get_indexed_models()\n    ]\n    return sorted(choices, key=lambda x: x[1])\n\n\nclass SearchForm(forms.Form):\n    q = forms.CharField(\n        required=False,\n        label=_(\"Search\"),\n        widget=forms.TextInput(attrs={\"type\": \"search\"}),\n    )\n\n    def __init__(self, *args, **kwargs):\n        self.searchqueryset = kwargs.pop(\"searchqueryset\", None)\n        self.load_all = kwargs.pop(\"load_all\", False)\n\n        if self.searchqueryset is None:\n            self.searchqueryset = SearchQuerySet()\n\n        super().__init__(*args, **kwargs)\n\n    def no_query_found(self):\n        \"\"\"\n        Determines the behavior when no query was found.\n\n        By default, no results are returned (``EmptySearchQuerySet``).\n\n        Should you want to show all results, override this method in your\n        own ``SearchForm`` subclass and do ``return self.searchqueryset.all()``.\n        \"\"\"\n        return EmptySearchQuerySet()\n\n    def search(self):\n        if not self.is_valid():\n            return self.no_query_found()\n\n        if not self.cleaned_data.get(\"q\"):\n            return self.no_query_found()\n\n        sqs = self.searchqueryset.auto_query(self.cleaned_data[\"q\"])\n\n        if self.load_all:\n            sqs = sqs.load_all()\n\n        return sqs\n\n    def get_suggestion(self):\n        if not self.is_valid():\n            return None\n\n        return self.searchqueryset.spelling_suggestion(self.cleaned_data[\"q\"])\n\n\nclass HighlightedSearchForm(SearchForm):\n    def search(self):\n        return super().search().highlight()\n\n\nclass FacetedSearchForm(SearchForm):\n    def __init__(self, *args, **kwargs):\n        self.selected_facets = kwargs.pop(\"selected_facets\", [])\n        super().__init__(*args, **kwargs)\n\n    def search(self):\n        sqs = super().search()\n\n        # We need to process each facet to ensure that the field name and the\n        # value are quoted correctly and separately:\n        for facet in self.selected_facets:\n            if \":\" not in facet:\n                continue\n\n            field, value = facet.split(\":\", 1)\n\n            if value:\n                sqs = sqs.narrow('%s:\"%s\"' % (field, sqs.query.clean(value)))\n\n        return sqs\n\n\nclass ModelSearchForm(SearchForm):\n    def __init__(self, *args, **kwargs):\n        super().__init__(*args, **kwargs)\n        self.fields[\"models\"] = forms.MultipleChoiceField(\n            choices=model_choices(),\n            required=False,\n            label=_(\"Search In\"),\n            widget=forms.CheckboxSelectMultiple,\n        )\n\n    def get_models(self):\n        \"\"\"Return a list of the selected models.\"\"\"\n        search_models = []\n\n        if self.is_valid():\n            for model in self.cleaned_data[\"models\"]:\n                search_models.append(haystack_get_model(*model.split(\".\")))\n\n        return search_models\n\n    def search(self):\n        sqs = super().search()\n        return sqs.models(*self.get_models())\n\n\nclass HighlightedModelSearchForm(ModelSearchForm):\n    def search(self):\n        return super().search().highlight()\n\n\nclass FacetedModelSearchForm(ModelSearchForm):\n    selected_facets = forms.CharField(required=False, widget=forms.HiddenInput)\n\n    def search(self):\n        sqs = super().search()\n\n        if hasattr(self, \"cleaned_data\") and self.cleaned_data[\"selected_facets\"]:\n            sqs = sqs.narrow(self.cleaned_data[\"selected_facets\"])\n\n        return sqs.models(*self.get_models())\n"
  },
  {
    "path": "haystack/generic_views.py",
    "content": "from django.conf import settings\nfrom django.core.paginator import Paginator\nfrom django.views.generic import FormView\nfrom django.views.generic.edit import FormMixin\nfrom django.views.generic.list import MultipleObjectMixin\n\nfrom .forms import FacetedSearchForm, ModelSearchForm\nfrom .query import SearchQuerySet\n\nRESULTS_PER_PAGE = getattr(settings, \"HAYSTACK_SEARCH_RESULTS_PER_PAGE\", 20)\n\n\nclass SearchMixin(MultipleObjectMixin, FormMixin):\n    \"\"\"\n    A mixin that allows adding in Haystacks search functionality into\n    another view class.\n\n    This mixin exhibits similar end functionality as the base Haystack search\n    view, but with some important distinctions oriented around greater\n    compatibility with Django's built-in class based views and mixins.\n\n    Normal flow:\n\n        self.request = request\n\n        self.form = self.build_form()\n        self.query = self.get_query()\n        self.results = self.get_results()\n\n        return self.create_response()\n\n    This mixin should:\n\n        1. Make the form\n        2. Get the queryset\n        3. Return the paginated queryset\n\n    \"\"\"\n\n    template_name = \"search/search.html\"\n    load_all = True\n    form_class = ModelSearchForm\n    context_object_name = None\n    paginate_by = RESULTS_PER_PAGE\n    paginate_orphans = 0\n    paginator_class = Paginator\n    page_kwarg = \"page\"\n    form_name = \"form\"\n    search_field = \"q\"\n    object_list = None\n\n    def get_queryset(self):\n        if self.queryset is None:\n            self.queryset = SearchQuerySet()\n        return self.queryset\n\n    def get_form_kwargs(self):\n        \"\"\"\n        Returns the keyword arguments for instantiating the form.\n        \"\"\"\n        kwargs = {\"initial\": self.get_initial()}\n        if self.request.method == \"GET\":\n            kwargs.update({\"data\": self.request.GET})\n        kwargs.update(\n            {\"searchqueryset\": self.get_queryset(), \"load_all\": self.load_all}\n        )\n        return kwargs\n\n    def form_invalid(self, form):\n        context = self.get_context_data(\n            **{self.form_name: form, \"object_list\": self.get_queryset()}\n        )\n        return self.render_to_response(context)\n\n    def form_valid(self, form):\n        self.queryset = form.search()\n        context = self.get_context_data(\n            **{\n                self.form_name: form,\n                \"query\": form.cleaned_data.get(self.search_field),\n                \"object_list\": self.queryset,\n            }\n        )\n        return self.render_to_response(context)\n\n\nclass FacetedSearchMixin(SearchMixin):\n    \"\"\"\n    A mixin that allows adding in a Haystack search functionality with search\n    faceting.\n    \"\"\"\n\n    form_class = FacetedSearchForm\n    facet_fields = None\n\n    def get_form_kwargs(self):\n        kwargs = super().get_form_kwargs()\n        kwargs.update({\"selected_facets\": self.request.GET.getlist(\"selected_facets\")})\n        return kwargs\n\n    def get_context_data(self, **kwargs):\n        context = super().get_context_data(**kwargs)\n        context.update({\"facets\": self.queryset.facet_counts()})\n        return context\n\n    def get_queryset(self):\n        qs = super().get_queryset()\n        for field in self.facet_fields:\n            qs = qs.facet(field)\n        return qs\n\n\nclass SearchView(SearchMixin, FormView):\n    \"\"\"A view class for searching a Haystack managed search index\"\"\"\n\n    def get(self, request, *args, **kwargs):\n        \"\"\"\n        Handles GET requests and instantiates a blank version of the form.\n        \"\"\"\n        form_class = self.get_form_class()\n        form = self.get_form(form_class)\n\n        if form.is_valid():\n            return self.form_valid(form)\n        else:\n            return self.form_invalid(form)\n\n\nclass FacetedSearchView(FacetedSearchMixin, SearchView):\n    \"\"\"\n    A view class for searching a Haystack managed search index with\n    facets\n    \"\"\"\n\n    pass\n"
  },
  {
    "path": "haystack/indexes.py",
    "content": "import copy\nimport threading\nimport warnings\n\nfrom django.core.exceptions import ImproperlyConfigured\nfrom django.utils.encoding import force_str\n\nfrom haystack import connection_router, connections\nfrom haystack.constants import Indexable  # NOQA — exposed as a public export\nfrom haystack.constants import DEFAULT_ALIAS, DJANGO_CT, DJANGO_ID, ID\nfrom haystack.fields import (  # NOQA — exposed as a public export\n    BooleanField,\n    CharField,\n    DateField,\n    DateTimeField,\n    DecimalField,\n    EdgeNgramField,\n    FacetCharField,\n    FacetDateField,\n    FacetDateTimeField,\n    FacetIntegerField,\n    FloatField,\n    IntegerField,\n    LocationField,\n    MultiValueField,\n    NgramField,\n    SearchField,\n    SearchFieldError,\n)\nfrom haystack.manager import SearchIndexManager\nfrom haystack.utils import get_facet_field_name, get_identifier, get_model_ct\n\n\nclass DeclarativeMetaclass(type):\n    def __new__(cls, name, bases, attrs):\n        attrs[\"fields\"] = {}\n\n        # Inherit any fields from parent(s).\n        try:\n            parents = [b for b in bases if issubclass(b, SearchIndex)]\n            # Simulate the MRO.\n            parents.reverse()\n\n            for p in parents:\n                fields = getattr(p, \"fields\", None)\n\n                if fields:\n                    attrs[\"fields\"].update(fields)\n        except NameError:\n            pass\n\n        # Build a dictionary of faceted fields for cross-referencing.\n        facet_fields = {}\n\n        for field_name, obj in attrs.items():\n            # Only need to check the FacetFields.\n            if hasattr(obj, \"facet_for\"):\n                if obj.facet_for not in facet_fields:\n                    facet_fields[obj.facet_for] = []\n\n                facet_fields[obj.facet_for].append(field_name)\n\n        built_fields = {}\n\n        for field_name, obj in attrs.items():\n            if isinstance(obj, SearchField):\n                field = attrs[field_name]\n                field.set_instance_name(field_name)\n                built_fields[field_name] = field\n\n                # Only check non-faceted fields for the following info.\n                if not hasattr(field, \"facet_for\"):\n                    if field.faceted:\n                        # If no other field is claiming this field as\n                        # ``facet_for``, create a shadow ``FacetField``.\n                        if field_name not in facet_fields:\n                            shadow_facet_name = get_facet_field_name(field_name)\n                            shadow_facet_field = field.facet_class(facet_for=field_name)\n                            shadow_facet_field.set_instance_name(shadow_facet_name)\n                            built_fields[shadow_facet_name] = shadow_facet_field\n\n        attrs[\"fields\"].update(built_fields)\n\n        # Assigning default 'objects' query manager if it does not already exist\n        if \"objects\" not in attrs:\n            try:\n                attrs[\"objects\"] = SearchIndexManager(attrs[\"Meta\"].index_label)\n            except (KeyError, AttributeError):\n                attrs[\"objects\"] = SearchIndexManager(DEFAULT_ALIAS)\n\n        return super(DeclarativeMetaclass, cls).__new__(cls, name, bases, attrs)\n\n\nclass SearchIndex(threading.local, metaclass=DeclarativeMetaclass):\n    \"\"\"\n    Base class for building indexes.\n\n    An example might look like this::\n\n        import datetime\n        from haystack import indexes\n        from myapp.models import Note\n\n        class NoteIndex(indexes.SearchIndex, indexes.Indexable):\n            text = indexes.CharField(document=True, use_template=True)\n            author = indexes.CharField(model_attr='user')\n            pub_date = indexes.DateTimeField(model_attr='pub_date')\n\n            def get_model(self):\n                return Note\n\n            def index_queryset(self, using=None):\n                return self.get_model().objects.filter(pub_date__lte=datetime.datetime.now())\n\n    \"\"\"\n\n    def __init__(self):\n        self.prepared_data = None\n        content_fields = []\n\n        self.field_map = {}\n        for field_name, field in self.fields.items():\n            # form field map\n            self.field_map[field.index_fieldname] = field_name\n            if field.document is True:\n                content_fields.append(field_name)\n\n        if not len(content_fields) == 1:\n            raise SearchFieldError(\n                \"The index '%s' must have one (and only one) SearchField with document=True.\"\n                % self.__class__.__name__\n            )\n\n    def get_model(self):\n        \"\"\"\n        Should return the ``Model`` class (not an instance) that the rest of the\n        ``SearchIndex`` should use.\n\n        This method is required & you must override it to return the correct class.\n        \"\"\"\n        raise NotImplementedError(\n            \"You must provide a 'get_model' method for the '%r' index.\" % self\n        )\n\n    def index_queryset(self, using=None):\n        \"\"\"\n        Get the default QuerySet to index when doing a full update.\n\n        Subclasses can override this method to avoid indexing certain objects.\n        \"\"\"\n        return self.get_model()._default_manager.all()\n\n    def read_queryset(self, using=None):\n        \"\"\"\n        Get the default QuerySet for read actions.\n\n        Subclasses can override this method to work with other managers.\n        Useful when working with default managers that filter some objects.\n        \"\"\"\n        return self.index_queryset(using=using)\n\n    def build_queryset(self, using=None, start_date=None, end_date=None):\n        \"\"\"\n        Get the default QuerySet to index when doing an index update.\n\n        Subclasses can override this method to take into account related\n        model modification times.\n\n        The default is to use ``SearchIndex.index_queryset`` and filter\n        based on ``SearchIndex.get_updated_field``\n        \"\"\"\n        extra_lookup_kwargs = {}\n        model = self.get_model()\n        updated_field = self.get_updated_field()\n\n        update_field_msg = (\n            \"No updated date field found for '%s' \" \"- not restricting by age.\"\n        ) % model.__name__\n\n        if start_date:\n            if updated_field:\n                extra_lookup_kwargs[\"%s__gte\" % updated_field] = start_date\n            else:\n                warnings.warn(update_field_msg)\n\n        if end_date:\n            if updated_field:\n                extra_lookup_kwargs[\"%s__lte\" % updated_field] = end_date\n            else:\n                warnings.warn(update_field_msg)\n\n        index_qs = None\n\n        if hasattr(self, \"get_queryset\"):\n            warnings.warn(\n                \"'SearchIndex.get_queryset' was deprecated in Haystack v2.\"\n                \" Please rename the method 'index_queryset'.\"\n            )\n            index_qs = self.get_queryset()\n        else:\n            index_qs = self.index_queryset(using=using)\n\n        if not hasattr(index_qs, \"filter\"):\n            raise ImproperlyConfigured(\n                \"The '%r' class must return a 'QuerySet' in the 'index_queryset' method.\"\n                % self\n            )\n\n        # `.select_related()` seems like a good idea here but can fail on\n        # nullable `ForeignKey` as well as what seems like other cases.\n        return index_qs.filter(**extra_lookup_kwargs).order_by(model._meta.pk.name)\n\n    def prepare(self, obj):\n        \"\"\"\n        Fetches and adds/alters data before indexing.\n        \"\"\"\n        self.prepared_data = {\n            ID: get_identifier(obj),\n            DJANGO_CT: get_model_ct(self.get_model()),\n            DJANGO_ID: force_str(obj.pk),\n        }\n\n        for field_name, field in self.fields.items():\n            # Use the possibly overridden name, which will default to the\n            # variable name of the field.\n            self.prepared_data[field.index_fieldname] = field.prepare(obj)\n\n            if hasattr(self, \"prepare_%s\" % field_name):\n                value = getattr(self, \"prepare_%s\" % field_name)(obj)\n                self.prepared_data[field.index_fieldname] = value\n\n        return self.prepared_data\n\n    def full_prepare(self, obj):\n        self.prepared_data = self.prepare(obj)\n\n        for field_name, field in self.fields.items():\n            # Duplicate data for faceted fields.\n            if getattr(field, \"facet_for\", None):\n                source_field_name = self.fields[field.facet_for].index_fieldname\n\n                # If there's data there, leave it alone. Otherwise, populate it\n                # with whatever the related field has.\n                if (\n                    self.prepared_data[field_name] is None\n                    and source_field_name in self.prepared_data\n                ):\n                    self.prepared_data[field.index_fieldname] = self.prepared_data[\n                        source_field_name\n                    ]\n\n            # Remove any fields that lack a value and are ``null=True``.\n            if field.null is True:\n                if self.prepared_data[field.index_fieldname] is None:\n                    del self.prepared_data[field.index_fieldname]\n\n        return self.prepared_data\n\n    def get_content_field(self):\n        \"\"\"Returns the field that supplies the primary document to be indexed.\"\"\"\n        for _, field in self.fields.items():\n            if field.document is True:\n                return field.index_fieldname\n\n    def get_field_weights(self):\n        \"\"\"Returns a dict of fields with weight values\"\"\"\n        weights = {}\n        for field_name, field in self.fields.items():\n            if field.boost:\n                weights[field_name] = field.boost\n        return weights\n\n    def _get_backend(self, using):\n        warnings.warn(\n            \"SearchIndex._get_backend is deprecated; use SearchIndex.get_backend instead\",\n            DeprecationWarning,\n        )\n        return self.get_backend(using)\n\n    def get_backend(self, using=None):\n        if using is None:\n            try:\n                using = connection_router.for_write(index=self)[0]\n            except IndexError:\n                # There's no backend to handle it. Bomb out.\n                return None\n\n        return connections[using].get_backend()\n\n    def update(self, using=None):\n        \"\"\"\n        Updates the entire index.\n\n        If ``using`` is provided, it specifies which connection should be\n        used. Default relies on the routers to decide which backend should\n        be used.\n        \"\"\"\n\n        backend = self.get_backend(using)\n\n        if backend is not None:\n            backend.update(self, self.index_queryset(using=using))\n\n    def update_object(self, instance, using=None, **kwargs):\n        \"\"\"\n        Update the index for a single object. Attached to the class's\n        post-save hook.\n\n        If ``using`` is provided, it specifies which connection should be\n        used. Default relies on the routers to decide which backend should\n        be used.\n        \"\"\"\n        # Check to make sure we want to index this first.\n        if self.should_update(instance, **kwargs):\n            backend = self.get_backend(using)\n\n            if backend is not None:\n                backend.update(self, [instance])\n\n    def remove_object(self, instance, using=None, **kwargs):\n        \"\"\"\n        Remove an object from the index. Attached to the class's\n        post-delete hook.\n\n        If ``using`` is provided, it specifies which connection should be\n        used. Default relies on the routers to decide which backend should\n        be used.\n        \"\"\"\n        backend = self.get_backend(using)\n\n        if backend is not None:\n            backend.remove(instance, **kwargs)\n\n    def clear(self, using=None):\n        \"\"\"\n        Clears the entire index.\n\n        If ``using`` is provided, it specifies which connection should be\n        used. Default relies on the routers to decide which backend should\n        be used.\n        \"\"\"\n        backend = self.get_backend(using)\n\n        if backend is not None:\n            backend.clear(models=[self.get_model()])\n\n    def reindex(self, using=None):\n        \"\"\"\n        Completely clear the index for this model and rebuild it.\n\n        If ``using`` is provided, it specifies which connection should be\n        used. Default relies on the routers to decide which backend should\n        be used.\n        \"\"\"\n        self.clear(using=using)\n        self.update(using=using)\n\n    def get_updated_field(self):\n        \"\"\"\n        Get the field name that represents the updated date for the model.\n\n        If specified, this is used by the reindex command to filter out results\n        from the QuerySet, enabling you to reindex only recent records. This\n        method should either return None (reindex everything always) or a\n        string of the Model's DateField/DateTimeField name.\n        \"\"\"\n        return None\n\n    def should_update(self, instance, **kwargs):\n        \"\"\"\n        Determine if an object should be updated in the index.\n\n        It's useful to override this when an object may save frequently and\n        cause excessive reindexing. You should check conditions on the instance\n        and return False if it is not to be indexed.\n\n        By default, returns True (always reindex).\n        \"\"\"\n        return True\n\n    def load_all_queryset(self):\n        \"\"\"\n        Provides the ability to override how objects get loaded in conjunction\n        with ``SearchQuerySet.load_all``.\n\n        This is useful for post-processing the results from the query, enabling\n        things like adding ``select_related`` or filtering certain data.\n\n        By default, returns ``all()`` on the model's default manager.\n        \"\"\"\n        return self.get_model()._default_manager.all()\n\n\nclass BasicSearchIndex(SearchIndex):\n    text = CharField(document=True, use_template=True)\n\n\n# End SearchIndexes\n# Begin ModelSearchIndexes\n\n\ndef index_field_from_django_field(f, default=CharField):\n    \"\"\"\n    Returns the Haystack field type that would likely be associated with each\n    Django type.\n    \"\"\"\n    result = default\n\n    if f.get_internal_type() in (\"DateField\", \"DateTimeField\"):\n        result = DateTimeField\n    elif f.get_internal_type() in (\"BooleanField\", \"NullBooleanField\"):\n        result = BooleanField\n    elif f.get_internal_type() in (\"CommaSeparatedIntegerField\",):\n        result = MultiValueField\n    elif f.get_internal_type() in (\"DecimalField\", \"FloatField\"):\n        result = FloatField\n    elif f.get_internal_type() in (\n        \"IntegerField\",\n        \"PositiveIntegerField\",\n        \"PositiveSmallIntegerField\",\n        \"SmallIntegerField\",\n    ):\n        result = IntegerField\n\n    return result\n\n\nclass ModelSearchIndex(SearchIndex):\n    \"\"\"\n    Introspects the model assigned to it and generates a `SearchIndex` based on\n    the fields of that model.\n\n    In addition, it adds a `text` field that is the `document=True` field and\n    has `use_template=True` option set, just like the `BasicSearchIndex`.\n\n    Usage of this class might result in inferior `SearchIndex` objects, which\n    can directly affect your search results. Use this to establish basic\n    functionality and move to custom `SearchIndex` objects for better control.\n\n    At this time, it does not handle related fields.\n    \"\"\"\n\n    text = CharField(document=True, use_template=True)\n    # list of reserved field names\n    fields_to_skip = (ID, DJANGO_CT, DJANGO_ID, \"content\", \"text\")\n\n    def __init__(self, extra_field_kwargs=None):\n        super().__init__()\n\n        self.model = None\n\n        self.prepared_data = None\n        content_fields = []\n        self.extra_field_kwargs = extra_field_kwargs or {}\n\n        # Introspect the model, adding/removing fields as needed.\n        # Adds/Excludes should happen only if the fields are not already\n        # defined in `self.fields`.\n        self._meta = getattr(self, \"Meta\", None)\n\n        if self._meta:\n            self.model = getattr(self._meta, \"model\", None)\n            fields = getattr(self._meta, \"fields\", [])\n            excludes = getattr(self._meta, \"excludes\", [])\n\n            # Add in the new fields.\n            self.fields.update(self.get_fields(fields, excludes))\n\n        for field_name, field in self.fields.items():\n            if field.document is True:\n                content_fields.append(field_name)\n\n        if not len(content_fields) == 1:\n            raise SearchFieldError(\n                \"The index '%s' must have one (and only one) SearchField with document=True.\"\n                % self.__class__.__name__\n            )\n\n    def should_skip_field(self, field):\n        \"\"\"\n        Given a Django model field, return if it should be included in the\n        contributed SearchFields.\n        \"\"\"\n        # Skip fields in skip list\n        if field.name in self.fields_to_skip:\n            return True\n\n        # Ignore certain fields (AutoField, related fields).\n        if field.primary_key or field.is_relation:\n            return True\n\n        return False\n\n    def get_model(self):\n        return self.model\n\n    def get_index_fieldname(self, f):\n        \"\"\"\n        Given a Django field, return the appropriate index fieldname.\n        \"\"\"\n        return f.name\n\n    def get_fields(self, fields=None, excludes=None):\n        \"\"\"\n        Given any explicit fields to include and fields to exclude, add\n        additional fields based on the associated model.\n        \"\"\"\n        final_fields = {}\n        fields = fields or []\n        excludes = excludes or []\n\n        for f in self.model._meta.fields:\n            # If the field name is already present, skip\n            if f.name in self.fields:\n                continue\n\n            # If field is not present in explicit field listing, skip\n            if fields and f.name not in fields:\n                continue\n\n            # If field is in exclude list, skip\n            if excludes and f.name in excludes:\n                continue\n\n            if self.should_skip_field(f):\n                continue\n\n            index_field_class = index_field_from_django_field(f)\n\n            kwargs = copy.copy(self.extra_field_kwargs)\n            kwargs.update({\"model_attr\": f.name})\n\n            if f.null is True:\n                kwargs[\"null\"] = True\n\n            if f.has_default():\n                kwargs[\"default\"] = f.default\n\n            final_fields[f.name] = index_field_class(**kwargs)\n            final_fields[f.name].set_instance_name(self.get_index_fieldname(f))\n\n        return final_fields\n"
  },
  {
    "path": "haystack/inputs.py",
    "content": "import re\nimport warnings\n\nfrom django.utils.encoding import force_str\n\n\nclass BaseInput(object):\n    \"\"\"\n    The base input type. Doesn't do much. You want ``Raw`` instead.\n    \"\"\"\n\n    input_type_name = \"base\"\n    post_process = True\n\n    def __init__(self, query_string, **kwargs):\n        self.query_string = query_string\n        self.kwargs = kwargs\n\n    def __repr__(self):\n        return \"<%s '%s'>\" % (self.__class__.__name__, self)\n\n    def __str__(self):\n        return force_str(self.query_string)\n\n    def prepare(self, query_obj):\n        return self.query_string\n\n\nclass Raw(BaseInput):\n    \"\"\"\n    An input type for passing a query directly to the backend.\n\n    Prone to not being very portable.\n    \"\"\"\n\n    input_type_name = \"raw\"\n    post_process = False\n\n\nclass PythonData(BaseInput):\n    \"\"\"\n    Represents a bare Python non-string type.\n\n    Largely only for internal use.\n    \"\"\"\n\n    input_type_name = \"python_data\"\n\n\nclass Clean(BaseInput):\n    \"\"\"\n    An input type for sanitizing user/untrusted input.\n    \"\"\"\n\n    input_type_name = \"clean\"\n\n    def prepare(self, query_obj):\n        query_string = super().prepare(query_obj)\n        return query_obj.clean(query_string)\n\n\nclass Exact(BaseInput):\n    \"\"\"\n    An input type for making exact matches.\n    \"\"\"\n\n    input_type_name = \"exact\"\n\n    def prepare(self, query_obj):\n        query_string = super().prepare(query_obj)\n\n        if self.kwargs.get(\"clean\", False):\n            # We need to clean each part of the exact match.\n            exact_bits = [\n                Clean(bit).prepare(query_obj) for bit in query_string.split(\" \") if bit\n            ]\n            query_string = \" \".join(exact_bits)\n\n        return query_obj.build_exact_query(query_string)\n\n\nclass Not(Clean):\n    \"\"\"\n    An input type for negating a query.\n    \"\"\"\n\n    input_type_name = \"not\"\n\n    def prepare(self, query_obj):\n        query_string = super().prepare(query_obj)\n        return query_obj.build_not_query(query_string)\n\n\nclass AutoQuery(BaseInput):\n    \"\"\"\n    A convenience class that handles common user queries.\n\n    In addition to cleaning all tokens, it handles double quote bits as\n    exact matches & terms with '-' in front as NOT queries.\n    \"\"\"\n\n    input_type_name = \"auto_query\"\n    post_process = False\n    exact_match_re = re.compile(r'\"(?P<phrase>.*?)\"')\n\n    def prepare(self, query_obj):\n        query_string = super().prepare(query_obj)\n        exacts = self.exact_match_re.findall(query_string)\n        tokens = []\n        query_bits = []\n\n        for rough_token in self.exact_match_re.split(query_string):\n            if not rough_token:\n                continue\n            elif rough_token not in exacts:\n                # We have something that's not an exact match but may have more\n                # than on word in it.\n                tokens.extend(rough_token.split(\" \"))\n            else:\n                tokens.append(rough_token)\n\n        for token in tokens:\n            if not token:\n                continue\n            if token in exacts:\n                query_bits.append(Exact(token, clean=True).prepare(query_obj))\n            elif token.startswith(\"-\") and len(token) > 1:\n                # This might break Xapian. Check on this.\n                query_bits.append(Not(token[1:]).prepare(query_obj))\n            else:\n                query_bits.append(Clean(token).prepare(query_obj))\n\n        return \" \".join(query_bits)\n\n\nclass AltParser(BaseInput):\n    \"\"\"\n    If the engine supports it, this input type allows for submitting a query\n    that uses a different parser.\n    \"\"\"\n\n    input_type_name = \"alt_parser\"\n    post_process = False\n    use_parens = False\n\n    def __init__(self, parser_name, query_string=\"\", **kwargs):\n        self.parser_name = parser_name\n        self.query_string = query_string\n        self.kwargs = kwargs\n\n    def __repr__(self):\n        return \"<%s '%s' '%s' '%s'>\" % (\n            self.__class__.__name__,\n            self.parser_name,\n            self.query_string,\n            self.kwargs,\n        )\n\n    def prepare(self, query_obj):\n        if not hasattr(query_obj, \"build_alt_parser_query\"):\n            warnings.warn(\n                \"Use of 'AltParser' input type is being ignored, as the '%s' backend doesn't support them.\"\n                % query_obj\n            )\n            return \"\"\n\n        return query_obj.build_alt_parser_query(\n            self.parser_name, self.query_string, **self.kwargs\n        )\n"
  },
  {
    "path": "haystack/management/__init__.py",
    "content": ""
  },
  {
    "path": "haystack/management/commands/__init__.py",
    "content": ""
  },
  {
    "path": "haystack/management/commands/build_solr_schema.py",
    "content": "import os\n\nimport requests\nfrom django.conf import settings\nfrom django.core.exceptions import ImproperlyConfigured\nfrom django.core.management.base import BaseCommand, CommandError\nfrom django.template import loader\n\nfrom haystack import connections, constants\nfrom haystack.backends.solr_backend import SolrSearchBackend\n\n\nclass Command(BaseCommand):\n    help = (  # noqa A003\n        \"Generates a Solr schema that reflects the indexes using templates \"\n        \" under a django template dir 'search_configuration/*.xml'.  If none are \"\n        \" found, then provides defaults suitable to Solr 6.4\"\n    )\n    schema_template_loc = \"search_configuration/schema.xml\"\n    solrcfg_template_loc = \"search_configuration/solrconfig.xml\"\n\n    def add_arguments(self, parser):\n        parser.add_argument(\n            \"-f\",\n            \"--filename\",\n            help=\"Generate schema.xml directly into a file instead of stdout.\"\n            \" Does not render solrconfig.xml\",\n        )\n        parser.add_argument(\n            \"-u\",\n            \"--using\",\n            default=constants.DEFAULT_ALIAS,\n            help=\"Select a specific Solr connection to work with.\",\n        )\n        parser.add_argument(\n            \"-c\",\n            \"--configure-directory\",\n            help=\"Attempt to configure a core located in the given directory\"\n            \" by removing the managed-schema.xml(renaming) if it \"\n            \" exists, configuring the core by rendering the schema.xml and \"\n            \" solrconfig.xml templates provided in the django project's \"\n            \" TEMPLATE_DIR/search_configuration directories\",\n        )\n        parser.add_argument(\n            \"-r\",\n            \"--reload-core\",\n            help=\"If provided, attempts to automatically reload the solr core\"\n            ' via the urls in the \"URL\" and \"ADMIN_URL\" settings of the SOLR'\n            \" HAYSTACK_CONNECTIONS entry. Both MUST be set.\",\n        )\n\n    def handle(self, **options):\n        \"\"\"Generates a Solr schema that reflects the indexes.\"\"\"\n        using = options.get(\"using\")\n        if not isinstance(connections[using].get_backend(), SolrSearchBackend):\n            raise ImproperlyConfigured(\"'%s' isn't configured as a SolrEngine\" % using)\n\n        schema_xml = self.build_template(\n            using=using, template_filename=Command.schema_template_loc\n        )\n        solrcfg_xml = self.build_template(\n            using=using, template_filename=Command.solrcfg_template_loc\n        )\n\n        filename = options.get(\"filename\")\n        configure_directory = options.get(\"configure_directory\")\n        reload_core = options.get(\"reload_core\")\n\n        if filename:\n            self.stdout.write(\n                \"Trying to write schema file located at {}\".format(filename)\n            )\n            self.write_file(filename, schema_xml)\n\n            if reload_core:\n                connections[using].get_backend().reload()\n\n        if configure_directory:\n            self.stdout.write(\n                \"Trying to configure core located at {}\".format(configure_directory)\n            )\n\n            managed_schema_path = os.path.join(configure_directory, \"managed-schema\")\n\n            if os.path.isfile(managed_schema_path):\n                try:\n                    os.rename(managed_schema_path, \"%s.old\" % managed_schema_path)\n                except OSError as exc:\n                    raise CommandError(\n                        \"Could not rename old managed schema file {}: {}\".format(\n                            managed_schema_path, exc\n                        )\n                    )\n\n            schema_xml_path = os.path.join(configure_directory, \"schema.xml\")\n\n            try:\n                self.write_file(schema_xml_path, schema_xml)\n            except EnvironmentError as exc:\n                raise CommandError(\n                    \"Could not configure {}: {}\".format(schema_xml_path, exc)\n                )\n\n            solrconfig_path = os.path.join(configure_directory, \"solrconfig.xml\")\n\n            try:\n                self.write_file(solrconfig_path, solrcfg_xml)\n            except EnvironmentError as exc:\n                raise CommandError(\n                    \"Could not write {}: {}\".format(solrconfig_path, exc)\n                )\n\n        if reload_core:\n            core = settings.HAYSTACK_CONNECTIONS[using][\"URL\"].rsplit(\"/\", 1)[-1]\n\n            if \"ADMIN_URL\" not in settings.HAYSTACK_CONNECTIONS[using]:\n                raise ImproperlyConfigured(\n                    \"'ADMIN_URL' must be specified in the HAYSTACK_CONNECTIONS\"\n                    \" for the %s backend\" % using\n                )\n            if \"URL\" not in settings.HAYSTACK_CONNECTIONS[using]:\n                raise ImproperlyConfigured(\n                    \"'URL' must be specified in the HAYSTACK_CONNECTIONS\"\n                    \" for the %s backend\" % using\n                )\n\n            try:\n                self.stdout.write(\"Trying to reload core named {}\".format(core))\n                resp = requests.get(\n                    settings.HAYSTACK_CONNECTIONS[using][\"ADMIN_URL\"],\n                    params={\"action\": \"RELOAD\", \"core\": core},\n                )\n\n                if not resp.ok:\n                    raise CommandError(\n                        \"Failed to reload core – Solr error: {}\".format(resp)\n                    )\n            except CommandError:\n                raise\n            except Exception as exc:\n                raise CommandError(\"Failed to reload core {}: {}\".format(core, exc))\n\n        if not filename and not configure_directory and not reload_core:\n            self.print_stdout(schema_xml)\n\n    def build_context(self, using):\n        backend = connections[using].get_backend()\n\n        if not isinstance(backend, SolrSearchBackend):\n            raise ImproperlyConfigured(\n                \"'%s' isn't configured as a SolrEngine\" % backend.connection_alias\n            )\n\n        content_field_name, fields = backend.build_schema(\n            connections[using].get_unified_index().all_searchfields()\n        )\n        return {\n            \"content_field_name\": content_field_name,\n            \"fields\": fields,\n            \"default_operator\": constants.DEFAULT_OPERATOR,\n            \"ID\": constants.ID,\n            \"DJANGO_CT\": constants.DJANGO_CT,\n            \"DJANGO_ID\": constants.DJANGO_ID,\n        }\n\n    def build_template(self, using, template_filename=schema_template_loc):\n        t = loader.get_template(template_filename)\n        c = self.build_context(using=using)\n        return t.render(c)\n\n    def print_stdout(self, schema_xml):\n        self.stderr.write(\"\\n\")\n        self.stderr.write(\"\\n\")\n        self.stderr.write(\"\\n\")\n        self.stderr.write(\n            \"Save the following output to 'schema.xml' and place it in your Solr configuration directory.\\n\"\n        )\n        self.stderr.write(\n            \"--------------------------------------------------------------------------------------------\\n\"\n        )\n        self.stderr.write(\"\\n\")\n        self.stdout.write(schema_xml)\n\n    def write_file(self, filename, schema_xml):\n        with open(filename, \"w\") as schema_file:\n            schema_file.write(schema_xml)\n            os.fsync(schema_file.fileno())\n"
  },
  {
    "path": "haystack/management/commands/clear_index.py",
    "content": "from django.core.management.base import BaseCommand\n\nfrom haystack import connections\n\n\nclass Command(BaseCommand):\n    help = \"Clears out the search index completely.\"  # noqa A003\n\n    def add_arguments(self, parser):\n        parser.add_argument(\n            \"--noinput\",\n            action=\"store_false\",\n            dest=\"interactive\",\n            default=True,\n            help=\"If provided, no prompts will be issued to the user and the data will be wiped out.\",\n        )\n        parser.add_argument(\n            \"-u\",\n            \"--using\",\n            action=\"append\",\n            default=[],\n            help=\"Update only the named backend (can be used multiple times). \"\n            \"By default all backends will be updated.\",\n        )\n        parser.add_argument(\n            \"--nocommit\",\n            action=\"store_false\",\n            dest=\"commit\",\n            default=True,\n            help=\"Will pass commit=False to the backend.\",\n        )\n\n    def handle(self, **options):\n        \"\"\"Clears out the search index completely.\"\"\"\n        self.verbosity = int(options.get(\"verbosity\", 1))\n        self.commit = options.get(\"commit\", True)\n\n        using = options.get(\"using\")\n        if not using:\n            using = connections.connections_info.keys()\n\n        if options.get(\"interactive\", True):\n            self.stdout.write(\n                \"WARNING: This will irreparably remove EVERYTHING from your search index in connection '%s'.\"\n                % \"', '\".join(using)\n            )\n            self.stdout.write(\n                \"Your choices after this are to restore from backups or rebuild via the `rebuild_index` command.\"\n            )\n\n            yes_or_no = input(\"Are you sure you wish to continue? [y/N] \")\n\n            if not yes_or_no.lower().startswith(\"y\"):\n                self.stdout.write(\"No action taken.\")\n                return\n\n        if self.verbosity >= 1:\n            self.stdout.write(\n                \"Removing all documents from your index because you said so.\"\n            )\n\n        for backend_name in using:\n            backend = connections[backend_name].get_backend()\n            backend.clear(commit=self.commit)\n\n        if self.verbosity >= 1:\n            self.stdout.write(\"All documents removed.\")\n"
  },
  {
    "path": "haystack/management/commands/haystack_info.py",
    "content": "from django.core.management.base import BaseCommand\n\nfrom haystack import connections\nfrom haystack.constants import DEFAULT_ALIAS\n\n\nclass Command(BaseCommand):\n    help = \"Provides feedback about the current Haystack setup.\"  # noqa A003\n\n    def handle(self, **options):\n        \"\"\"Provides feedback about the current Haystack setup.\"\"\"\n\n        unified_index = connections[DEFAULT_ALIAS].get_unified_index()\n        indexed = unified_index.get_indexed_models()\n        index_count = len(indexed)\n        self.stdout.write(\"Number of handled %s index(es).\" % index_count)\n\n        for index in indexed:\n            self.stdout.write(\n                \"  - Model: %s by Index: %s\"\n                % (index.__name__, unified_index.get_indexes()[index])\n            )\n"
  },
  {
    "path": "haystack/management/commands/rebuild_index.py",
    "content": "from django.core.management import call_command\nfrom django.core.management.base import BaseCommand\n\nfrom .update_index import DEFAULT_MAX_RETRIES\n\n\nclass Command(BaseCommand):\n    help = \"Completely rebuilds the search index by removing the old data and then updating.\"  # noqa A003\n\n    def add_arguments(self, parser):\n        parser.add_argument(\n            \"--noinput\",\n            action=\"store_false\",\n            dest=\"interactive\",\n            default=True,\n            help=\"If provided, no prompts will be issued to the user and the data will be wiped out.\",\n        )\n        parser.add_argument(\n            \"-u\",\n            \"--using\",\n            action=\"append\",\n            default=[],\n            help=\"Update only the named backend (can be used multiple times). \"\n            \"By default all backends will be updated.\",\n        )\n        parser.add_argument(\n            \"-k\",\n            \"--workers\",\n            default=0,\n            type=int,\n            help=\"Allows for the use multiple workers to parallelize indexing. Requires multiprocessing.\",\n        )\n        parser.add_argument(\n            \"--nocommit\",\n            action=\"store_false\",\n            dest=\"commit\",\n            default=True,\n            help=\"Will pass commit=False to the backend.\",\n        )\n        parser.add_argument(\n            \"-b\",\n            \"--batch-size\",\n            dest=\"batchsize\",\n            type=int,\n            help=\"Number of items to index at once.\",\n        )\n        parser.add_argument(\n            \"-t\",\n            \"--max-retries\",\n            action=\"store\",\n            dest=\"max_retries\",\n            type=int,\n            default=DEFAULT_MAX_RETRIES,\n            help=\"Maximum number of attempts to write to the backend when an error occurs.\",\n        )\n\n    def handle(self, **options):\n        clear_options = options.copy()\n        update_options = options.copy()\n        for key in (\"batchsize\", \"workers\", \"max_retries\"):\n            del clear_options[key]\n        for key in (\"interactive\",):\n            del update_options[key]\n        call_command(\"clear_index\", **clear_options)\n        call_command(\"update_index\", **update_options)\n"
  },
  {
    "path": "haystack/management/commands/update_index.py",
    "content": "import logging\nimport multiprocessing\nimport os\nimport time\nfrom datetime import timedelta\n\nfrom django.core.management.base import BaseCommand, CommandError\nfrom django.db import close_old_connections, reset_queries\nfrom django.utils.encoding import force_str, smart_bytes\nfrom django.utils.timezone import now\n\nfrom haystack import connections as haystack_connections\nfrom haystack.exceptions import NotHandled\nfrom haystack.query import SearchQuerySet\nfrom haystack.utils.app_loading import haystack_get_models, haystack_load_apps\n\nDEFAULT_BATCH_SIZE = None\nDEFAULT_AGE = None\nDEFAULT_MAX_RETRIES = 5\n\nLOG = multiprocessing.log_to_stderr(level=logging.WARNING)\n\n\ndef update_worker(args):\n    if len(args) != 10:\n        LOG.error(\"update_worker received incorrect arguments: %r\", args)\n        raise ValueError(\"update_worker received incorrect arguments\")\n\n    (\n        model,\n        start,\n        end,\n        total,\n        using,\n        start_date,\n        end_date,\n        verbosity,\n        commit,\n        max_retries,\n    ) = args\n\n    # FIXME: confirm that this is still relevant with modern versions of Django:\n    # We need to reset the connections, otherwise the different processes\n    # will try to share the connection, which causes things to blow up.\n    from django.db import connections\n\n    for alias, info in connections.databases.items():\n        # We need to also tread lightly with SQLite, because blindly wiping\n        # out connections (via ``... = {}``) destroys in-memory DBs.\n        if \"sqlite3\" not in info[\"ENGINE\"]:\n            try:\n                close_old_connections()\n                if isinstance(connections._connections, dict):\n                    del connections._connections[alias]\n                else:\n                    delattr(connections._connections, alias)\n            except KeyError:\n                pass\n\n    # Request that the connection clear out any transient sessions, file handles, etc.\n    haystack_connections[using].reset_sessions()\n\n    unified_index = haystack_connections[using].get_unified_index()\n    index = unified_index.get_index(model)\n    backend = haystack_connections[using].get_backend()\n\n    qs = index.build_queryset(using=using, start_date=start_date, end_date=end_date)\n    do_update(backend, index, qs, start, end, total, verbosity, commit, max_retries)\n    return args\n\n\ndef do_update(\n    backend,\n    index,\n    qs,\n    start,\n    end,\n    total,\n    verbosity=1,\n    commit=True,\n    max_retries=DEFAULT_MAX_RETRIES,\n    last_max_pk=None,\n):\n\n    # Get a clone of the QuerySet so that the cache doesn't bloat up\n    # in memory. Useful when reindexing large amounts of data.\n    # the query must be ordered by PK in order to get the max PK in each batch\n    small_cache_qs = qs.all().order_by(\"pk\")\n\n    # If we got the max seen PK from last batch, use it to restrict the qs\n    # to values above; this optimises the query for Postgres as not to\n    # devolve into multi-second run time at large offsets.\n    if last_max_pk is not None:\n        current_qs = small_cache_qs.filter(pk__gt=last_max_pk)[: end - start]\n    else:\n        current_qs = small_cache_qs[start:end]\n\n    # Remember maximum PK seen so far\n    max_pk = None\n    current_qs = list(current_qs)\n    if current_qs:\n        max_pk = current_qs[-1].pk\n\n    is_parent_process = hasattr(os, \"getppid\") and os.getpid() == os.getppid()\n\n    if verbosity >= 2:\n        if is_parent_process:\n            print(\"  indexed %s - %d of %d.\" % (start + 1, end, total))\n        else:\n            print(\n                \"  indexed %s - %d of %d (worker PID: %s).\"\n                % (start + 1, end, total, os.getpid())\n            )\n\n    retries = 0\n    while retries < max_retries:\n        try:\n            # FIXME: Get the right backend.\n            backend.update(index, current_qs, commit=commit)\n            if verbosity >= 2 and retries:\n                print(\n                    \"Completed indexing {} - {}, tried {}/{} times\".format(\n                        start + 1, end, retries + 1, max_retries\n                    )\n                )\n            break\n        except Exception as exc:\n            # Catch all exceptions which do not normally trigger a system exit, excluding SystemExit and\n            # KeyboardInterrupt. This avoids needing to import the backend-specific exception subclasses\n            # from pysolr, elasticsearch, whoosh, requests, etc.\n            retries += 1\n\n            error_context = {\n                \"start\": start + 1,\n                \"end\": end,\n                \"retries\": retries,\n                \"max_retries\": max_retries,\n                \"pid\": os.getpid(),\n                \"exc\": exc,\n            }\n\n            error_msg = \"Failed indexing %(start)s - %(end)s (retry %(retries)s/%(max_retries)s): %(exc)s\"\n            if not is_parent_process:\n                error_msg += \" (pid %(pid)s): %(exc)s\"\n\n            if retries >= max_retries:\n                LOG.error(error_msg, error_context, exc_info=True)\n                raise\n            elif verbosity >= 2:\n                LOG.warning(error_msg, error_context, exc_info=True)\n\n            # If going to try again, sleep a bit before\n            time.sleep(2 ** retries)\n\n    # Clear out the DB connections queries because it bloats up RAM.\n    reset_queries()\n    return max_pk\n\n\nclass Command(BaseCommand):\n    help = \"Freshens the index for the given app(s).\"  # noqa A003\n\n    def add_arguments(self, parser):\n        parser.add_argument(\n            \"app_label\",\n            nargs=\"*\",\n            help=\"App label of an application to update the search index.\",\n        )\n        parser.add_argument(\n            \"-m\",\n            \"--minutes\",\n            type=int,\n            help=\"Number of minutes back to consider objects new.\",\n        )\n        parser.add_argument(\n            \"-a\",\n            \"--age\",\n            type=int,\n            default=DEFAULT_AGE,\n            help=\"Number of hours back to consider objects new.\",\n        )\n        parser.add_argument(\n            \"-s\",\n            \"--start\",\n            dest=\"start_date\",\n            help=\"The start date for indexing. Can be any dateutil-parsable string;\"\n            \" YYYY-MM-DDTHH:MM:SS is recommended to avoid confusion\",\n        )\n        parser.add_argument(\n            \"-e\",\n            \"--end\",\n            dest=\"end_date\",\n            help=\"The end date for indexing. Can be any dateutil-parsable string;\"\n            \" YYYY-MM-DDTHH:MM:SS is recommended to avoid confusion\",\n        )\n        parser.add_argument(\n            \"-b\",\n            \"--batch-size\",\n            dest=\"batchsize\",\n            type=int,\n            help=\"Number of items to index at once.\",\n        )\n        parser.add_argument(\n            \"-r\",\n            \"--remove\",\n            action=\"store_true\",\n            default=False,\n            help=\"Remove objects from the index that are no longer present in the database.\",\n        )\n        parser.add_argument(\n            \"-u\",\n            \"--using\",\n            action=\"append\",\n            default=[],\n            help=\"Update only the named backend (can be used multiple times). \"\n            \"By default all backends will be updated.\",\n        )\n        parser.add_argument(\n            \"-k\",\n            \"--workers\",\n            type=int,\n            default=0,\n            help=\"Allows for the use multiple workers to parallelize indexing.\",\n        )\n        parser.add_argument(\n            \"--nocommit\",\n            action=\"store_false\",\n            dest=\"commit\",\n            default=True,\n            help=\"Will pass commit=False to the backend.\",\n        )\n        parser.add_argument(\n            \"-t\",\n            \"--max-retries\",\n            action=\"store\",\n            dest=\"max_retries\",\n            type=int,\n            default=DEFAULT_MAX_RETRIES,\n            help=\"Maximum number of attempts to write to the backend when an error occurs.\",\n        )\n\n    def handle(self, **options):\n        self.verbosity = int(options.get(\"verbosity\", 1))\n        self.batchsize = options.get(\"batchsize\", DEFAULT_BATCH_SIZE)\n        self.start_date = None\n        self.end_date = None\n        self.remove = options.get(\"remove\", False)\n        self.workers = options.get(\"workers\", 0)\n        self.commit = options.get(\"commit\", True)\n        self.max_retries = options.get(\"max_retries\", DEFAULT_MAX_RETRIES)\n\n        self.backends = options.get(\"using\")\n        if not self.backends:\n            self.backends = haystack_connections.connections_info.keys()\n\n        age = options.get(\"age\", DEFAULT_AGE)\n        minutes = options.get(\"minutes\", DEFAULT_AGE)\n        start_date = options.get(\"start_date\")\n        end_date = options.get(\"end_date\")\n\n        if self.verbosity > 2:\n            LOG.setLevel(logging.DEBUG)\n        elif self.verbosity > 1:\n            LOG.setLevel(logging.INFO)\n\n        if (minutes and age) or (minutes and start_date) or (age and start_date):\n            raise CommandError(\n                \"Minutes / age / start date options are mutually exclusive\"\n            )\n\n        if minutes is not None:\n            self.start_date = now() - timedelta(minutes=minutes)\n\n        if age is not None:\n            self.start_date = now() - timedelta(hours=age)\n\n        if start_date is not None:\n            from dateutil.parser import parse as dateutil_parse\n\n            try:\n                self.start_date = dateutil_parse(start_date)\n            except ValueError:\n                pass\n\n        if end_date is not None:\n            from dateutil.parser import parse as dateutil_parse\n\n            try:\n                self.end_date = dateutil_parse(end_date)\n            except ValueError:\n                pass\n\n        labels = options.get(\"app_label\") or haystack_load_apps()\n        for label in labels:\n            for using in self.backends:\n                try:\n                    self.update_backend(label, using)\n                except Exception:\n                    LOG.exception(\"Error updating %s using %s \", label, using)\n                    raise\n\n    def update_backend(self, label, using):\n        backend = haystack_connections[using].get_backend()\n        unified_index = haystack_connections[using].get_unified_index()\n\n        for model in haystack_get_models(label):\n            try:\n                index = unified_index.get_index(model)\n            except NotHandled:\n                if self.verbosity >= 2:\n                    self.stdout.write(\"Skipping '%s' - no index.\" % model)\n                continue\n\n            if self.workers > 0:\n                # workers resetting connections leads to references to models / connections getting\n                # stale and having their connection disconnected from under them. Resetting before\n                # the loop continues and it accesses the ORM makes it better.\n                close_old_connections()\n\n            qs = index.build_queryset(\n                using=using, start_date=self.start_date, end_date=self.end_date\n            )\n\n            total = qs.count()\n\n            if self.verbosity >= 1:\n                self.stdout.write(\n                    \"Indexing %d %s\"\n                    % (total, force_str(model._meta.verbose_name_plural))\n                )\n\n            batch_size = self.batchsize or backend.batch_size\n\n            if self.workers > 0:\n                ghetto_queue = []\n\n            max_pk = None\n            for start in range(0, total, batch_size):\n                end = min(start + batch_size, total)\n\n                if self.workers == 0:\n                    max_pk = do_update(\n                        backend,\n                        index,\n                        qs,\n                        start,\n                        end,\n                        total,\n                        verbosity=self.verbosity,\n                        commit=self.commit,\n                        max_retries=self.max_retries,\n                        last_max_pk=max_pk,\n                    )\n                else:\n                    ghetto_queue.append(\n                        (\n                            model,\n                            start,\n                            end,\n                            total,\n                            using,\n                            self.start_date,\n                            self.end_date,\n                            self.verbosity,\n                            self.commit,\n                            self.max_retries,\n                        )\n                    )\n\n            if self.workers > 0:\n                pool = multiprocessing.Pool(self.workers)\n\n                successful_tasks = pool.map(update_worker, ghetto_queue)\n\n                if len(ghetto_queue) != len(successful_tasks):\n                    self.stderr.write(\n                        \"Queued %d tasks but only %d completed\"\n                        % (len(ghetto_queue), len(successful_tasks))\n                    )\n                    for i in ghetto_queue:\n                        if i not in successful_tasks:\n                            self.stderr.write(\"Incomplete task: %s\" % repr(i))\n\n                pool.close()\n                pool.join()\n\n            if self.remove:\n                if self.start_date or self.end_date or total <= 0:\n                    # They're using a reduced set, which may not incorporate\n                    # all pks. Rebuild the list with everything.\n                    qs = index.index_queryset(using=using).values_list(\"pk\", flat=True)\n                    database_pks = {smart_bytes(pk) for pk in qs}\n                else:\n                    database_pks = {\n                        smart_bytes(pk) for pk in qs.values_list(\"pk\", flat=True)\n                    }\n\n                # Since records may still be in the search index but not the local database\n                # we'll use that to create batches for processing.\n                # See https://github.com/django-haystack/django-haystack/issues/1186\n                index_total = (\n                    SearchQuerySet(using=backend.connection_alias).models(model).count()\n                )\n\n                # Retrieve PKs from the index. Note that this cannot be a numeric range query because although\n                # pks are normally numeric they can be non-numeric UUIDs or other custom values. To reduce\n                # load on the search engine, we only retrieve the pk field, which will be checked against the\n                # full list obtained from the database, and the id field, which will be used to delete the\n                # record should it be found to be stale.\n                index_pks = SearchQuerySet(using=backend.connection_alias).models(model)\n                index_pks = index_pks.values_list(\"pk\", \"id\")\n\n                # We'll collect all of the record IDs which are no longer present in the database and delete\n                # them after walking the entire index. This uses more memory than the incremental approach but\n                # avoids needing the pagination logic below to account for both commit modes:\n                stale_records = set()\n\n                for start in range(0, index_total, batch_size):\n                    upper_bound = start + batch_size\n\n                    # If the database pk is no longer present, queue the index key for removal:\n                    for pk, rec_id in index_pks[start:upper_bound]:\n                        if smart_bytes(pk) not in database_pks:\n                            stale_records.add(rec_id)\n\n                if stale_records:\n                    if self.verbosity >= 1:\n                        self.stdout.write(\n                            \"  removing %d stale records.\" % len(stale_records)\n                        )\n\n                    for rec_id in stale_records:\n                        # Since the PK was not in the database list, we'll delete the record from the search\n                        # index:\n                        if self.verbosity >= 2:\n                            self.stdout.write(\"  removing %s.\" % rec_id)\n\n                        backend.remove(rec_id, commit=self.commit)\n"
  },
  {
    "path": "haystack/manager.py",
    "content": "from haystack.query import EmptySearchQuerySet, SearchQuerySet\n\n\nclass SearchIndexManager(object):\n    def __init__(self, using=None):\n        super().__init__()\n        self.using = using\n\n    def get_search_queryset(self):\n        \"\"\"Returns a new SearchQuerySet object.  Subclasses can override this method\n        to easily customize the behavior of the Manager.\n        \"\"\"\n        return SearchQuerySet(using=self.using)\n\n    def get_empty_query_set(self):\n        return EmptySearchQuerySet(using=self.using)\n\n    def all(self):  # noqa A003\n        return self.get_search_queryset()\n\n    def none(self):\n        return self.get_empty_query_set()\n\n    def filter(self, *args, **kwargs):  # noqa A003\n        return self.get_search_queryset().filter(*args, **kwargs)\n\n    def exclude(self, *args, **kwargs):\n        return self.get_search_queryset().exclude(*args, **kwargs)\n\n    def filter_and(self, *args, **kwargs):\n        return self.get_search_queryset().filter_and(*args, **kwargs)\n\n    def filter_or(self, *args, **kwargs):\n        return self.get_search_queryset().filter_or(*args, **kwargs)\n\n    def order_by(self, *args):\n        return self.get_search_queryset().order_by(*args)\n\n    def highlight(self):\n        return self.get_search_queryset().highlight()\n\n    def boost(self, term, boost):\n        return self.get_search_queryset().boost(term, boost)\n\n    def facet(self, field):\n        return self.get_search_queryset().facet(field)\n\n    def within(self, field, point_1, point_2):\n        return self.get_search_queryset().within(field, point_1, point_2)\n\n    def dwithin(self, field, point, distance):\n        return self.get_search_queryset().dwithin(field, point, distance)\n\n    def distance(self, field, point):\n        return self.get_search_queryset().distance(field, point)\n\n    def date_facet(self, field, start_date, end_date, gap_by, gap_amount=1):\n        return self.get_search_queryset().date_facet(\n            field, start_date, end_date, gap_by, gap_amount=1\n        )\n\n    def query_facet(self, field, query):\n        return self.get_search_queryset().query_facet(field, query)\n\n    def narrow(self, query):\n        return self.get_search_queryset().narrow(query)\n\n    def raw_search(self, query_string, **kwargs):\n        return self.get_search_queryset().raw_search(query_string, **kwargs)\n\n    def load_all(self):\n        return self.get_search_queryset().load_all()\n\n    def auto_query(self, query_string, fieldname=\"content\"):\n        return self.get_search_queryset().auto_query(query_string, fieldname=fieldname)\n\n    def autocomplete(self, **kwargs):\n        return self.get_search_queryset().autocomplete(**kwargs)\n\n    def using(self, connection_name):\n        return self.get_search_queryset().using(connection_name)\n\n    def count(self):\n        return self.get_search_queryset().count()\n\n    def best_match(self):\n        return self.get_search_queryset().best_match()\n\n    def latest(self, date_field):\n        return self.get_search_queryset().latest(date_field)\n\n    def more_like_this(self, model_instance):\n        return self.get_search_queryset().more_like_this(model_instance)\n\n    def facet_counts(self):\n        return self.get_search_queryset().facet_counts()\n\n    def spelling_suggestion(self, preferred_query=None):\n        return self.get_search_queryset().spelling_suggestion(preferred_query=None)\n\n    def values(self, *fields):\n        return self.get_search_queryset().values(*fields)\n\n    def values_list(self, *fields, **kwargs):\n        return self.get_search_queryset().values_list(*fields, **kwargs)\n"
  },
  {
    "path": "haystack/models.py",
    "content": "# \"Hey, Django! Look at me, I'm an app! For Serious!\"\nfrom django.core.exceptions import ObjectDoesNotExist\nfrom django.utils.encoding import force_str\nfrom django.utils.text import capfirst\n\nfrom haystack.constants import DEFAULT_ALIAS\nfrom haystack.exceptions import NotHandled, SpatialError\nfrom haystack.utils import log as logging\nfrom haystack.utils.app_loading import haystack_get_model\n\ntry:\n    from geopy import distance as geopy_distance\nexcept ImportError:\n    geopy_distance = None\n\n\n# Not a Django model, but tightly tied to them and there doesn't seem to be a\n# better spot in the tree.\nclass SearchResult:\n    \"\"\"\n    A single search result. The actual object is loaded lazily by accessing\n    object; until then this object only stores the model, pk, and score.\n\n    Note that iterating over SearchResults and getting the object for each\n    result will do O(N) database queries, which may not fit your needs for\n    performance.\n    \"\"\"\n\n    def __init__(self, app_label, model_name, pk, score, **kwargs):\n        self.app_label, self.model_name = app_label, model_name\n        self.pk = pk\n        self.score = score\n        self._object = None\n        self._model = None\n        self._verbose_name = None\n        self._additional_fields = []\n        self._point_of_origin = kwargs.pop(\"_point_of_origin\", None)\n        self._distance = kwargs.pop(\"_distance\", None)\n        self.stored_fields = None\n        self.log = self._get_log()\n\n        for key, value in kwargs.items():\n            if key not in self.__dict__:\n                self.__dict__[key] = value\n                self._additional_fields.append(key)\n\n    def _get_log(self):\n        return logging.getLogger(\"haystack\")\n\n    def __repr__(self):\n        return \"<SearchResult: %s.%s (pk=%r)>\" % (\n            self.app_label,\n            self.model_name,\n            self.pk,\n        )\n\n    def __str__(self):\n        return force_str(self.__repr__())\n\n    def __getattr__(self, attr):\n        if attr == \"__getnewargs__\":\n            raise AttributeError\n\n        return self.__dict__.get(attr, None)\n\n    def _get_searchindex(self):\n        from haystack import connections\n\n        return connections[DEFAULT_ALIAS].get_unified_index().get_index(self.model)\n\n    searchindex = property(_get_searchindex)\n\n    def _get_object(self):\n        if self._object is None:\n            if self.model is None:\n                self.log.error(\"Model could not be found for SearchResult '%s'.\", self)\n                return None\n\n            try:\n                try:\n                    self._object = self.searchindex.read_queryset().get(pk=self.pk)\n                except NotHandled:\n                    self.log.warning(\n                        \"Model '%s.%s' not handled by the routers.\",\n                        self.app_label,\n                        self.model_name,\n                    )\n                    # Revert to old behaviour\n                    self._object = self.model._default_manager.get(pk=self.pk)\n            except ObjectDoesNotExist:\n                self.log.error(\n                    \"Object could not be found in database for SearchResult '%s'.\", self\n                )\n                self._object = None\n\n        return self._object\n\n    def _set_object(self, obj):\n        self._object = obj\n\n    object = property(_get_object, _set_object)  # noqa A003\n\n    def _get_model(self):\n        if self._model is None:\n            try:\n                self._model = haystack_get_model(self.app_label, self.model_name)\n            except LookupError:\n                # this changed in change 1.7 to throw an error instead of\n                # returning None when the model isn't found. So catch the\n                # lookup error and keep self._model == None.\n                pass\n\n        return self._model\n\n    def _set_model(self, obj):\n        self._model = obj\n\n    model = property(_get_model, _set_model)\n\n    def _get_distance(self):\n        from django.contrib.gis.measure import Distance\n\n        if self._distance is None:\n            # We didn't get it from the backend & we haven't tried calculating\n            # it yet. Check if geopy is available to do it the \"slow\" way\n            # (even though slow meant 100 distance calculations in 0.004 seconds\n            # in my testing).\n            if geopy_distance is None:\n                raise SpatialError(\n                    \"The backend doesn't have 'DISTANCE_AVAILABLE' enabled & the 'geopy' library could not be imported, so distance information is not available.\"\n                )\n\n            if not self._point_of_origin:\n                raise SpatialError(\"The original point is not available.\")\n\n            if not hasattr(self, self._point_of_origin[\"field\"]):\n                raise SpatialError(\n                    \"The field '%s' was not included in search results, so the distance could not be calculated.\"\n                    % self._point_of_origin[\"field\"]\n                )\n\n            po_lng, po_lat = self._point_of_origin[\"point\"].coords\n            location_field = getattr(self, self._point_of_origin[\"field\"])\n\n            if location_field is None:\n                return None\n\n            lf_lng, lf_lat = location_field.coords\n            self._distance = Distance(\n                km=geopy_distance.distance((po_lat, po_lng), (lf_lat, lf_lng)).km\n            )\n\n        # We've either already calculated it or the backend returned it, so\n        # let's use that.\n        return self._distance\n\n    def _set_distance(self, dist):\n        self._distance = dist\n\n    distance = property(_get_distance, _set_distance)\n\n    def _get_verbose_name(self):\n        if self.model is None:\n            self.log.error(\"Model could not be found for SearchResult '%s'.\", self)\n            return \"\"\n\n        return force_str(capfirst(self.model._meta.verbose_name))\n\n    verbose_name = property(_get_verbose_name)\n\n    def _get_verbose_name_plural(self):\n        if self.model is None:\n            self.log.error(\"Model could not be found for SearchResult '%s'.\", self)\n            return \"\"\n\n        return force_str(capfirst(self.model._meta.verbose_name_plural))\n\n    verbose_name_plural = property(_get_verbose_name_plural)\n\n    def content_type(self):\n        \"\"\"Returns the content type for the result's model instance.\"\"\"\n        if self.model is None:\n            self.log.error(\"Model could not be found for SearchResult '%s'.\", self)\n            return \"\"\n\n        return str(self.model._meta)\n\n    def get_additional_fields(self):\n        \"\"\"\n        Returns a dictionary of all of the fields from the raw result.\n\n        Useful for serializing results. Only returns what was seen from the\n        search engine, so it may have extra fields Haystack's indexes aren't\n        aware of.\n        \"\"\"\n        additional_fields = {}\n\n        for fieldname in self._additional_fields:\n            additional_fields[fieldname] = getattr(self, fieldname)\n\n        return additional_fields\n\n    def get_stored_fields(self):\n        \"\"\"\n        Returns a dictionary of all of the stored fields from the SearchIndex.\n\n        Useful for serializing results. Only returns the fields Haystack's\n        indexes are aware of as being 'stored'.\n        \"\"\"\n        if self._stored_fields is None:\n            from haystack import connections\n\n            try:\n                index = (\n                    connections[DEFAULT_ALIAS].get_unified_index().get_index(self.model)\n                )\n            except NotHandled:\n                # Not found? Return nothing.\n                return {}\n\n            self._stored_fields = {}\n\n            # Iterate through the index's fields, pulling out the fields that\n            # are stored.\n            for fieldname, field in index.fields.items():\n                if field.stored is True:\n                    self._stored_fields[fieldname] = getattr(self, fieldname, \"\")\n\n        return self._stored_fields\n\n    def __getstate__(self):\n        \"\"\"\n        Returns a dictionary representing the ``SearchResult`` in order to\n        make it pickleable.\n        \"\"\"\n        # The ``log`` is excluded because, under the hood, ``logging`` uses\n        # ``threading.Lock``, which doesn't pickle well.\n        ret_dict = self.__dict__.copy()\n        del ret_dict[\"log\"]\n        return ret_dict\n\n    def __setstate__(self, data_dict):\n        \"\"\"\n        Updates the object's attributes according to data passed by pickle.\n        \"\"\"\n        self.__dict__.update(data_dict)\n        self.log = self._get_log()\n\n\ndef reload_indexes(sender, *args, **kwargs):\n    from haystack import connections\n\n    for conn in connections.all():\n        ui = conn.get_unified_index()\n        # Note: Unlike above, we're resetting the ``UnifiedIndex`` here.\n        # Thi gives us a clean slate.\n        ui.reset()\n"
  },
  {
    "path": "haystack/panels.py",
    "content": "from debug_toolbar.panels import DebugPanel\nfrom django.template.loader import render_to_string\nfrom django.utils.translation import gettext_lazy as _\n\nfrom haystack import connections\n\n\nclass HaystackDebugPanel(DebugPanel):\n    \"\"\"\n    Panel that displays information about the Haystack queries run while\n    processing the request.\n    \"\"\"\n\n    name = \"Haystack\"\n    has_content = True\n\n    def __init__(self, *args, **kwargs):\n        super().__init__(*args, **kwargs)\n        self._offset = {\n            alias: len(connections[alias].queries)\n            for alias in connections.connections_info.keys()\n        }\n        self._search_time = 0\n        self._queries = []\n        self._backends = {}\n\n    def nav_title(self):\n        return _(\"Haystack\")\n\n    def nav_subtitle(self):\n        self._queries = []\n        self._backends = {}\n\n        for alias in connections.connections_info.keys():\n            search_queries = connections[alias].queries[self._offset[alias] :]\n            self._backends[alias] = {\n                \"time_spent\": sum(float(q[\"time\"]) for q in search_queries),\n                \"queries\": len(search_queries),\n            }\n            self._queries.extend([(alias, q) for q in search_queries])\n\n        self._queries.sort(key=lambda x: x[1][\"start\"])\n        self._search_time = sum([d[\"time_spent\"] for d in self._backends.values()])\n        num_queries = len(self._queries)\n        return \"%d %s in %.2fms\" % (\n            num_queries,\n            (num_queries == 1) and \"query\" or \"queries\",\n            self._search_time,\n        )\n\n    def title(self):\n        return _(\"Search Queries\")\n\n    def url(self):\n        return \"\"\n\n    def content(self):\n        width_ratio_tally = 0\n\n        for alias, query in self._queries:\n            query[\"alias\"] = alias\n            query[\"query\"] = query[\"query_string\"]\n\n            if query.get(\"additional_kwargs\"):\n                if query[\"additional_kwargs\"].get(\"result_class\"):\n                    query[\"additional_kwargs\"][\"result_class\"] = str(\n                        query[\"additional_kwargs\"][\"result_class\"]\n                    )\n\n            try:\n                query[\"width_ratio\"] = (float(query[\"time\"]) / self._search_time) * 100\n            except ZeroDivisionError:\n                query[\"width_ratio\"] = 0\n\n            query[\"start_offset\"] = width_ratio_tally\n            width_ratio_tally += query[\"width_ratio\"]\n\n        context = self.context.copy()\n        context.update(\n            {\n                \"backends\": sorted(\n                    self._backends.items(), key=lambda x: -x[1][\"time_spent\"]\n                ),\n                \"queries\": [q for a, q in self._queries],\n                \"sql_time\": self._search_time,\n            }\n        )\n\n        return render_to_string(\"panels/haystack.html\", context)\n"
  },
  {
    "path": "haystack/query.py",
    "content": "import operator\nimport warnings\nfrom functools import reduce\n\nfrom haystack import connection_router, connections\nfrom haystack.backends import SQ\nfrom haystack.constants import DEFAULT_OPERATOR, ITERATOR_LOAD_PER_QUERY\nfrom haystack.exceptions import NotHandled\nfrom haystack.inputs import AutoQuery, Raw\nfrom haystack.utils import log as logging\n\n\nclass SearchQuerySet(object):\n    \"\"\"\n    Provides a way to specify search parameters and lazily load results.\n\n    Supports chaining (a la QuerySet) to narrow the search.\n    \"\"\"\n\n    def __init__(self, using=None, query=None):\n        # ``_using`` should only ever be a value other than ``None`` if it's\n        # been forced with the ``.using`` method.\n        self._using = using\n        self.query = None\n        self._determine_backend()\n\n        # If ``query`` is present, it should override even what the routers\n        # think.\n        if query is not None:\n            self.query = query\n\n        self._result_cache = []\n        self._result_count = None\n        self._cache_full = False\n        self._load_all = False\n        self._ignored_result_count = 0\n        self.log = logging.getLogger(\"haystack\")\n\n    def _determine_backend(self):\n        # A backend has been manually selected. Use it instead.\n        if self._using is not None:\n            self.query = connections[self._using].get_query()\n            return\n\n        # No backend, so rely on the routers to figure out what's right.\n        hints = {}\n\n        if self.query:\n            hints[\"models\"] = self.query.models\n\n        backend_alias = connection_router.for_read(**hints)\n\n        # The ``SearchQuery`` might swap itself out for a different variant\n        # here.\n        if self.query:\n            self.query = self.query.using(backend_alias)\n        else:\n            self.query = connections[backend_alias].get_query()\n\n    def __getstate__(self):\n        \"\"\"\n        For pickling.\n        \"\"\"\n        len(self)\n        obj_dict = self.__dict__.copy()\n        obj_dict[\"_iter\"] = None\n        obj_dict[\"log\"] = None\n        return obj_dict\n\n    def __setstate__(self, data_dict):\n        \"\"\"\n        For unpickling.\n        \"\"\"\n        self.__dict__ = data_dict\n        self.log = logging.getLogger(\"haystack\")\n\n    def __repr__(self):\n        return \"<SearchQuerySet: query=%r, using=%r>\" % (self.query, self._using)\n\n    def __len__(self):\n        if self._result_count is None:\n            self._result_count = self.query.get_count()\n\n            # Some backends give weird, false-y values here. Convert to zero.\n            if not self._result_count:\n                self._result_count = 0\n\n        # This needs to return the actual number of hits, not what's in the cache.\n        return self._result_count - self._ignored_result_count\n\n    def __iter__(self):\n        if self._cache_is_full():\n            # We've got a fully populated cache. Let Python do the hard work.\n            return iter(self._result_cache)\n\n        return self._manual_iter()\n\n    def __and__(self, other):\n        if isinstance(other, EmptySearchQuerySet):\n            return other._clone()\n        combined = self._clone()\n        combined.query.combine(other.query, SQ.AND)\n        return combined\n\n    def __or__(self, other):\n        combined = self._clone()\n        if isinstance(other, EmptySearchQuerySet):\n            return combined\n        combined.query.combine(other.query, SQ.OR)\n        return combined\n\n    def _cache_is_full(self):\n        if not self.query.has_run():\n            return False\n\n        if len(self) <= 0:\n            return True\n\n        try:\n            self._result_cache.index(None)\n            return False\n        except ValueError:\n            # No ``None``s found in the results. Check the length of the cache.\n            return len(self._result_cache) > 0\n\n    def _manual_iter(self):\n        # If we're here, our cache isn't fully populated.\n        # For efficiency, fill the cache as we go if we run out of results.\n        # Also, this can't be part of the __iter__ method due to Python's rules\n        # about generator functions.\n        current_position = 0\n        current_cache_max = 0\n\n        while True:\n            if len(self._result_cache) > 0:\n                try:\n                    current_cache_max = self._result_cache.index(None)\n                except ValueError:\n                    current_cache_max = len(self._result_cache)\n\n            while current_position < current_cache_max:\n                yield self._result_cache[current_position]\n                current_position += 1\n\n            if self._cache_is_full():\n                return\n\n            # We've run out of results and haven't hit our limit.\n            # Fill more of the cache.\n            if not self._fill_cache(\n                current_position, current_position + ITERATOR_LOAD_PER_QUERY\n            ):\n                return\n\n    def post_process_results(self, results):\n        to_cache = []\n\n        # Check if we wish to load all objects.\n        if self._load_all:\n            models_pks = {}\n            loaded_objects = {}\n\n            # Remember the search position for each result so we don't have to resort later.\n            for result in results:\n                models_pks.setdefault(result.model, []).append(result.pk)\n\n            # Load the objects for each model in turn.\n            for model in models_pks:\n                loaded_objects[model] = self._load_model_objects(\n                    model, models_pks[model]\n                )\n\n        for result in results:\n            if self._load_all:\n\n                model_objects = loaded_objects.get(result.model, {})\n                # Try to coerce a primary key object that matches the models pk\n                # We have to deal with semi-arbitrary keys being cast from strings (UUID, int, etc)\n                if model_objects:\n                    result_klass = type(next(iter(model_objects)))\n                    result.pk = result_klass(result.pk)\n\n                    try:\n                        result._object = model_objects[result.pk]\n                    except KeyError:\n                        # The object was either deleted since we indexed or should\n                        # be ignored for other reasons such as an overriden 'load_all_queryset';\n                        # fail silently.\n                        self._ignored_result_count += 1\n\n                        # avoid an unfilled None at the end of the result cache\n                        self._result_cache.pop()\n                        continue\n                else:\n                    # No objects were returned -- possible due to SQS nesting such as\n                    # XYZ.objects.filter(id__gt=10) where the amount ignored are\n                    # exactly equal to the ITERATOR_LOAD_PER_QUERY\n                    del self._result_cache[: len(results)]\n                    self._ignored_result_count += len(results)\n                    break\n\n            to_cache.append(result)\n\n        return to_cache\n\n    def _load_model_objects(self, model, pks):\n        try:\n            ui = connections[self.query._using].get_unified_index()\n            index = ui.get_index(model)\n            objects = index.read_queryset(using=self.query._using)\n            return objects.in_bulk(pks)\n        except NotHandled:\n            self.log.warning(\"Model '%s' not handled by the routers.\", model)\n            # Revert to old behaviour\n            return model._default_manager.in_bulk(pks)\n\n    def _fill_cache(self, start, end, **kwargs):\n        # Tell the query where to start from and how many we'd like.\n        self.query._reset()\n\n        if start is None:\n            start = 0\n\n        query_start = start\n        query_start += self._ignored_result_count\n        query_end = end\n        if query_end is not None:\n            query_end += self._ignored_result_count\n\n        self.query.set_limits(query_start, query_end)\n        results = self.query.get_results(**kwargs)\n\n        if results is None or len(results) == 0:\n            # trim missing stuff from the result cache\n            self._result_cache = self._result_cache[:start]\n            return False\n\n        # Setup the full cache now that we know how many results there are.\n        # We need the ``None``s as placeholders to know what parts of the\n        # cache we have/haven't filled.\n        # Using ``None`` like this takes up very little memory. In testing,\n        # an array of 100,000 ``None``s consumed less than .5 Mb, which ought\n        # to be an acceptable loss for consistent and more efficient caching.\n        if len(self._result_cache) == 0:\n            self._result_cache = [None] * self.query.get_count()\n\n        fill_start, fill_end = start, end\n        if fill_end is None:\n            fill_end = self.query.get_count()\n        cache_start = fill_start\n\n        while True:\n            to_cache = self.post_process_results(results)\n\n            # Assign by slice.\n            self._result_cache[cache_start : cache_start + len(to_cache)] = to_cache\n\n            if None in self._result_cache[start:end]:\n                fill_start = fill_end\n                fill_end += ITERATOR_LOAD_PER_QUERY\n                cache_start += len(to_cache)\n\n                # Tell the query where to start from and how many we'd like.\n                self.query._reset()\n                self.query.set_limits(fill_start, fill_end)\n                results = self.query.get_results()\n\n                if results is None or len(results) == 0:\n                    # No more results. Trim missing stuff from the result cache\n                    self._result_cache = self._result_cache[:cache_start]\n                    break\n            else:\n                break\n\n        return True\n\n    def __getitem__(self, k):\n        \"\"\"\n        Retrieves an item or slice from the set of results.\n        \"\"\"\n        if not isinstance(k, (slice, int)):\n            raise TypeError\n        assert (not isinstance(k, slice) and (k >= 0)) or (\n            isinstance(k, slice)\n            and (k.start is None or k.start >= 0)\n            and (k.stop is None or k.stop >= 0)\n        ), \"Negative indexing is not supported.\"\n\n        # Remember if it's a slice or not. We're going to treat everything as\n        # a slice to simply the logic and will `.pop()` at the end as needed.\n        if isinstance(k, slice):\n            is_slice = True\n            start = k.start\n\n            if k.stop is not None:\n                bound = int(k.stop)\n            else:\n                bound = None\n        else:\n            is_slice = False\n            start = k\n            bound = k + 1\n\n        # We need check to see if we need to populate more of the cache.\n        if len(self._result_cache) <= 0 or (\n            None in self._result_cache[start:bound] and not self._cache_is_full()\n        ):\n            try:\n                self._fill_cache(start, bound)\n            except StopIteration:\n                # There's nothing left, even though the bound is higher.\n                pass\n\n        # Cache should be full enough for our needs.\n        if is_slice:\n            return self._result_cache[start:bound]\n        else:\n            return self._result_cache[start]\n\n    # Methods that return a SearchQuerySet.\n    def all(self):  # noqa A003\n        \"\"\"Returns all results for the query.\"\"\"\n        return self._clone()\n\n    def none(self):\n        \"\"\"Returns an empty result list for the query.\"\"\"\n        return self._clone(klass=EmptySearchQuerySet)\n\n    def filter(self, *args, **kwargs):  # noqa A003\n        \"\"\"Narrows the search based on certain attributes and the default operator.\"\"\"\n        if DEFAULT_OPERATOR == \"OR\":\n            return self.filter_or(*args, **kwargs)\n        else:\n            return self.filter_and(*args, **kwargs)\n\n    def exclude(self, *args, **kwargs):\n        \"\"\"Narrows the search by ensuring certain attributes are not included.\"\"\"\n        clone = self._clone()\n        clone.query.add_filter(~SQ(*args, **kwargs))\n        return clone\n\n    def filter_and(self, *args, **kwargs):\n        \"\"\"Narrows the search by looking for (and including) certain attributes.\"\"\"\n        clone = self._clone()\n        clone.query.add_filter(SQ(*args, **kwargs))\n        return clone\n\n    def filter_or(self, *args, **kwargs):\n        \"\"\"Narrows the search by ensuring certain attributes are not included.\"\"\"\n        clone = self._clone()\n        clone.query.add_filter(SQ(*args, **kwargs), use_or=True)\n        return clone\n\n    def order_by(self, *args):\n        \"\"\"Alters the order in which the results should appear.\"\"\"\n        clone = self._clone()\n\n        for field in args:\n            clone.query.add_order_by(field)\n\n        return clone\n\n    def highlight(self, **kwargs):\n        \"\"\"Adds highlighting to the results.\"\"\"\n        clone = self._clone()\n        clone.query.add_highlight(**kwargs)\n        return clone\n\n    def models(self, *models):\n        \"\"\"Accepts an arbitrary number of Model classes to include in the search.\"\"\"\n        clone = self._clone()\n\n        for model in models:\n            if (\n                model\n                not in connections[self.query._using]\n                .get_unified_index()\n                .get_indexed_models()\n            ):\n                warnings.warn(\"The model %r is not registered for search.\" % (model,))\n\n            clone.query.add_model(model)\n\n        return clone\n\n    def result_class(self, klass):\n        \"\"\"\n        Allows specifying a different class to use for results.\n\n        Overrides any previous usages. If ``None`` is provided, Haystack will\n        revert back to the default ``SearchResult`` object.\n        \"\"\"\n        clone = self._clone()\n        clone.query.set_result_class(klass)\n        return clone\n\n    def boost(self, term, boost):\n        \"\"\"Boosts a certain aspect of the query.\"\"\"\n        clone = self._clone()\n        clone.query.add_boost(term, boost)\n        return clone\n\n    def facet(self, field, **options):\n        \"\"\"Adds faceting to a query for the provided field.\"\"\"\n        clone = self._clone()\n        clone.query.add_field_facet(field, **options)\n        return clone\n\n    def within(self, field, point_1, point_2):\n        \"\"\"Spatial: Adds a bounding box search to the query.\"\"\"\n        clone = self._clone()\n        clone.query.add_within(field, point_1, point_2)\n        return clone\n\n    def dwithin(self, field, point, distance):\n        \"\"\"Spatial: Adds a distance-based search to the query.\"\"\"\n        clone = self._clone()\n        clone.query.add_dwithin(field, point, distance)\n        return clone\n\n    def stats(self, field):\n        \"\"\"Adds stats to a query for the provided field.\"\"\"\n        return self.stats_facet(field, facet_fields=None)\n\n    def stats_facet(self, field, facet_fields=None):\n        \"\"\"Adds stats facet for the given field and facet_fields represents\n        the faceted fields.\"\"\"\n        clone = self._clone()\n        stats_facets = []\n        try:\n            stats_facets.append(sum(facet_fields, []))\n        except TypeError:\n            if facet_fields:\n                stats_facets.append(facet_fields)\n        clone.query.add_stats_query(field, stats_facets)\n        return clone\n\n    def distance(self, field, point):\n        \"\"\"\n        Spatial: Denotes results must have distance measurements from the\n        provided point.\n        \"\"\"\n        clone = self._clone()\n        clone.query.add_distance(field, point)\n        return clone\n\n    def date_facet(self, field, start_date, end_date, gap_by, gap_amount=1):\n        \"\"\"Adds faceting to a query for the provided field by date.\"\"\"\n        clone = self._clone()\n        clone.query.add_date_facet(\n            field, start_date, end_date, gap_by, gap_amount=gap_amount\n        )\n        return clone\n\n    def query_facet(self, field, query):\n        \"\"\"Adds faceting to a query for the provided field with a custom query.\"\"\"\n        clone = self._clone()\n        clone.query.add_query_facet(field, query)\n        return clone\n\n    def narrow(self, query):\n        \"\"\"Pushes existing facet choices into the search.\"\"\"\n\n        if isinstance(query, SQ):\n            # produce query string using empty query of the same class\n            empty_query = self.query._clone()\n            empty_query._reset()\n            query = query.as_query_string(empty_query.build_query_fragment)\n\n        clone = self._clone()\n        clone.query.add_narrow_query(query)\n        return clone\n\n    def raw_search(self, query_string, **kwargs):\n        \"\"\"Passes a raw query directly to the backend.\"\"\"\n        return self.filter(content=Raw(query_string, **kwargs))\n\n    def load_all(self):\n        \"\"\"Efficiently populates the objects in the search results.\"\"\"\n        clone = self._clone()\n        clone._load_all = True\n        return clone\n\n    def auto_query(self, query_string, fieldname=\"content\"):\n        \"\"\"\n        Performs a best guess constructing the search query.\n\n        This method is somewhat naive but works well enough for the simple,\n        common cases.\n        \"\"\"\n        kwargs = {fieldname: AutoQuery(query_string)}\n        return self.filter(**kwargs)\n\n    def autocomplete(self, **kwargs):\n        \"\"\"\n        A shortcut method to perform an autocomplete search.\n\n        Must be run against fields that are either ``NgramField`` or\n        ``EdgeNgramField``.\n        \"\"\"\n        clone = self._clone()\n        query_bits = []\n\n        for field_name, query in kwargs.items():\n            for word in query.split(\" \"):\n                bit = clone.query.clean(word.strip())\n                if bit:\n                    kwargs = {field_name: bit}\n                    query_bits.append(SQ(**kwargs))\n\n        return clone.filter(reduce(operator.__and__, query_bits))\n\n    def using(self, connection_name):\n        \"\"\"\n        Allows switching which connection the ``SearchQuerySet`` uses to\n        search in.\n        \"\"\"\n        clone = self._clone()\n        clone.query = self.query.using(connection_name)\n        clone._using = connection_name\n        return clone\n\n    # Methods that do not return a SearchQuerySet.\n\n    def count(self):\n        \"\"\"Returns the total number of matching results.\"\"\"\n        return len(self)\n\n    def best_match(self):\n        \"\"\"Returns the best/top search result that matches the query.\"\"\"\n        return self[0]\n\n    def latest(self, date_field):\n        \"\"\"Returns the most recent search result that matches the query.\"\"\"\n        clone = self._clone()\n        clone.query.clear_order_by()\n        clone.query.add_order_by(\"-%s\" % date_field)\n        return clone.best_match()\n\n    def more_like_this(self, model_instance):\n        \"\"\"Finds similar results to the object passed in.\"\"\"\n        clone = self._clone()\n        clone.query.more_like_this(model_instance)\n        return clone\n\n    def facet_counts(self):\n        \"\"\"\n        Returns the facet counts found by the query.\n\n        This will cause the query to execute and should generally be used when\n        presenting the data.\n        \"\"\"\n        if self.query.has_run():\n            return self.query.get_facet_counts()\n        else:\n            clone = self._clone()\n            return clone.query.get_facet_counts()\n\n    def stats_results(self):\n        \"\"\"\n        Returns the stats results found by the query.\n        \"\"\"\n        if self.query.has_run():\n            return self.query.get_stats()\n        else:\n            clone = self._clone()\n            return clone.query.get_stats()\n\n    def set_spelling_query(self, spelling_query):\n        \"\"\"Set the exact text to be used to generate spelling suggestions\n\n        When making complicated queries, such as the alt parser mechanism\n        used by Solr dismax/edismax, this provides a convenient way to set\n        the a simple text string which will be used to generate spelling\n        suggestions without including unnecessary syntax.\n        \"\"\"\n        clone = self._clone()\n        clone.query.set_spelling_query(spelling_query)\n        return clone\n\n    def spelling_suggestion(self, preferred_query=None):\n        \"\"\"\n        Returns the spelling suggestion found by the query.\n\n        To work, you must set ``INCLUDE_SPELLING`` within your connection's\n        settings dictionary to ``True``. Otherwise, ``None`` will be returned.\n\n        This will cause the query to execute and should generally be used when\n        presenting the data.\n        \"\"\"\n        if self.query.has_run():\n            return self.query.get_spelling_suggestion(preferred_query)\n        else:\n            clone = self._clone()\n            return clone.query.get_spelling_suggestion(preferred_query)\n\n    def values(self, *fields):\n        \"\"\"\n        Returns a list of dictionaries, each containing the key/value pairs for\n        the result, exactly like Django's ``ValuesQuerySet``.\n        \"\"\"\n        qs = self._clone(klass=ValuesSearchQuerySet)\n        qs._fields.extend(fields)\n        return qs\n\n    def values_list(self, *fields, **kwargs):\n        \"\"\"\n        Returns a list of field values as tuples, exactly like Django's\n        ``QuerySet.values``.\n\n        Optionally accepts a ``flat=True`` kwarg, which in the case of a\n        single field being provided, will return a flat list of that field\n        rather than a list of tuples.\n        \"\"\"\n        flat = kwargs.pop(\"flat\", False)\n\n        if flat and len(fields) > 1:\n            raise TypeError(\n                \"'flat' is not valid when values_list is called with more than one field.\"\n            )\n\n        qs = self._clone(klass=ValuesListSearchQuerySet)\n        qs._fields.extend(fields)\n        qs._flat = flat\n        return qs\n\n    # Utility methods.\n\n    def _clone(self, klass=None):\n        if klass is None:\n            klass = self.__class__\n\n        query = self.query._clone()\n        clone = klass(query=query)\n        clone._load_all = self._load_all\n        return clone\n\n\nclass EmptySearchQuerySet(SearchQuerySet):\n    \"\"\"\n    A stubbed SearchQuerySet that behaves as normal but always returns no\n    results.\n    \"\"\"\n\n    def __len__(self):\n        return 0\n\n    def _cache_is_full(self):\n        # Pretend the cache is always full with no results.\n        return True\n\n    def _clone(self, klass=None):\n        clone = super()._clone(klass=klass)\n        clone._result_cache = []\n        return clone\n\n    def _fill_cache(self, start, end):\n        return False\n\n    def facet_counts(self):\n        return {}\n\n\nclass ValuesListSearchQuerySet(SearchQuerySet):\n    \"\"\"\n    A ``SearchQuerySet`` which returns a list of field values as tuples, exactly\n    like Django's ``ValuesListQuerySet``.\n    \"\"\"\n\n    def __init__(self, *args, **kwargs):\n        super().__init__(*args, **kwargs)\n        self._flat = False\n        self._fields = []\n\n        # Removing this dependency would require refactoring much of the backend\n        # code (_process_results, etc.) and these aren't large enough to make it\n        # an immediate priority:\n        self._internal_fields = [\"id\", \"django_ct\", \"django_id\", \"score\"]\n\n    def _clone(self, klass=None):\n        clone = super()._clone(klass=klass)\n        clone._fields = self._fields\n        clone._flat = self._flat\n        return clone\n\n    def _fill_cache(self, start, end):\n        query_fields = set(self._internal_fields)\n        query_fields.update(self._fields)\n        kwargs = {\"fields\": query_fields}\n        return super()._fill_cache(start, end, **kwargs)\n\n    def post_process_results(self, results):\n        to_cache = []\n\n        if self._flat:\n            accum = to_cache.extend\n        else:\n            accum = to_cache.append\n\n        for result in results:\n            accum([getattr(result, i, None) for i in self._fields])\n\n        return to_cache\n\n\nclass ValuesSearchQuerySet(ValuesListSearchQuerySet):\n    \"\"\"\n    A ``SearchQuerySet`` which returns a list of dictionaries, each containing\n    the key/value pairs for the result, exactly like Django's\n    ``ValuesQuerySet``.\n    \"\"\"\n\n    def _fill_cache(self, start, end):\n        query_fields = set(self._internal_fields)\n        query_fields.update(self._fields)\n        kwargs = {\"fields\": query_fields}\n        return super(ValuesListSearchQuerySet, self)._fill_cache(start, end, **kwargs)\n\n    def post_process_results(self, results):\n        to_cache = []\n\n        for result in results:\n            to_cache.append({i: getattr(result, i, None) for i in self._fields})\n\n        return to_cache\n\n\nclass RelatedSearchQuerySet(SearchQuerySet):\n    \"\"\"\n    A variant of the SearchQuerySet that can handle `load_all_queryset`s.\n    \"\"\"\n\n    def __init__(self, *args, **kwargs):\n        super().__init__(*args, **kwargs)\n        self._load_all_querysets = {}\n        self._result_cache = []\n\n    def _load_model_objects(self, model, pks):\n        if model in self._load_all_querysets:\n            # Use the overriding queryset.\n            return self._load_all_querysets[model].in_bulk(pks)\n        else:\n            # Check the SearchIndex for the model for an override.\n\n            try:\n                ui = connections[self.query._using].get_unified_index()\n                index = ui.get_index(model)\n                qs = index.load_all_queryset()\n                return qs.in_bulk(pks)\n            except NotHandled:\n                # The model returned doesn't seem to be handled by the\n                # routers. We should silently fail and populate\n                # nothing for those objects.\n                return {}\n\n    def load_all_queryset(self, model, queryset):\n        \"\"\"\n        Allows for specifying a custom ``QuerySet`` that changes how ``load_all``\n        will fetch records for the provided model.\n\n        This is useful for post-processing the results from the query, enabling\n        things like adding ``select_related`` or filtering certain data.\n        \"\"\"\n        clone = self._clone()\n        clone._load_all_querysets[model] = queryset\n        return clone\n\n    def _clone(self, klass=None):\n        clone = super()._clone(klass=klass)\n        clone._load_all_querysets = self._load_all_querysets\n        return clone\n"
  },
  {
    "path": "haystack/routers.py",
    "content": "from haystack.constants import DEFAULT_ALIAS\n\n\nclass BaseRouter(object):\n    # Reserved for future extension.\n    pass\n\n\nclass DefaultRouter(BaseRouter):\n    def for_read(self, **hints):\n        return DEFAULT_ALIAS\n\n    def for_write(self, **hints):\n        return DEFAULT_ALIAS\n"
  },
  {
    "path": "haystack/signals.py",
    "content": "from django.db import models\n\nfrom haystack.exceptions import NotHandled\n\n\nclass BaseSignalProcessor(object):\n    \"\"\"\n    A convenient way to attach Haystack to Django's signals & cause things to\n    index.\n\n    By default, does nothing with signals but provides underlying functionality.\n    \"\"\"\n\n    def __init__(self, connections, connection_router):\n        self.connections = connections\n        self.connection_router = connection_router\n        self.setup()\n\n    def setup(self):\n        \"\"\"\n        A hook for setting up anything necessary for\n        ``handle_save/handle_delete`` to be executed.\n\n        Default behavior is to do nothing (``pass``).\n        \"\"\"\n        # Do nothing.\n        pass\n\n    def teardown(self):\n        \"\"\"\n        A hook for tearing down anything necessary for\n        ``handle_save/handle_delete`` to no longer be executed.\n\n        Default behavior is to do nothing (``pass``).\n        \"\"\"\n        # Do nothing.\n        pass\n\n    def handle_save(self, sender, instance, **kwargs):\n        \"\"\"\n        Given an individual model instance, determine which backends the\n        update should be sent to & update the object on those backends.\n        \"\"\"\n        using_backends = self.connection_router.for_write(instance=instance)\n\n        for using in using_backends:\n            try:\n                index = self.connections[using].get_unified_index().get_index(sender)\n                index.update_object(instance, using=using)\n            except NotHandled:\n                # TODO: Maybe log it or let the exception bubble?\n                pass\n\n    def handle_delete(self, sender, instance, **kwargs):\n        \"\"\"\n        Given an individual model instance, determine which backends the\n        delete should be sent to & delete the object on those backends.\n        \"\"\"\n        using_backends = self.connection_router.for_write(instance=instance)\n\n        for using in using_backends:\n            try:\n                index = self.connections[using].get_unified_index().get_index(sender)\n                index.remove_object(instance, using=using)\n            except NotHandled:\n                # TODO: Maybe log it or let the exception bubble?\n                pass\n\n\nclass RealtimeSignalProcessor(BaseSignalProcessor):\n    \"\"\"\n    Allows for observing when saves/deletes fire & automatically updates the\n    search engine appropriately.\n    \"\"\"\n\n    def setup(self):\n        # Naive (listen to all model saves).\n        models.signals.post_save.connect(self.handle_save)\n        models.signals.post_delete.connect(self.handle_delete)\n        # Efficient would be going through all backends & collecting all models\n        # being used, then hooking up signals only for those.\n\n    def teardown(self):\n        # Naive (listen to all model saves).\n        models.signals.post_save.disconnect(self.handle_save)\n        models.signals.post_delete.disconnect(self.handle_delete)\n        # Efficient would be going through all backends & collecting all models\n        # being used, then disconnecting signals only for those.\n"
  },
  {
    "path": "haystack/templates/panels/haystack.html",
    "content": "{% load i18n %}\n<table>\n    <thead>\n        <tr>\n            <th style=\"width: 50%\">{% trans 'Query' %}</th>\n            <th style=\"width: 10%\">{% trans 'Backend Alias' %}</th>\n            <th style=\"width: 25%\">{% trans 'Timeline' %}</th>\n            <th style=\"width: 5%\">{% trans 'Time' %}&nbsp;(ms)</th>\n            <th style=\"width: 10%\">{% trans 'Kwargs' %}</th>\n        </tr>\n    </thead>\n    <tbody>\n        {% for query in queries %}\n            <tr class=\"{% cycle 'djDebugOdd' 'djDebugEven' %}\">\n                <td class=\"syntax\">\n                    <div class=\"djDebugSqlWrap\">\n                        <div class=\"djDebugSql\">{{ query.query_string|safe }}</div>\n                    </div>\n                </td>\n                <td>{{ query.alias }}</td>\n                <td>\n                    <span class=\"djDebugLineChart{% if query.is_slow %} djDebugLineChartWarning{% endif %}\" style=\"width:{{ query.width_ratio }}%; left:{{ query.start_offset }}%; position: relative;\">&nbsp;</span>\n                </td>\n                <td>{{ query.time }}</td>\n                <td>\n                {% for key, value in query.additional_kwargs.items %}\n                    <strong>'{{ key }}':</strong> {{ value|stringformat:\"r\" }}<br>\n                {% endfor %}\n                </td>\n            </tr>\n        {% endfor %}\n    </tbody>\n</table>\n"
  },
  {
    "path": "haystack/templates/search_configuration/schema.xml",
    "content": "<?xml version=\"1.0\" encoding=\"UTF-8\" ?>\n<!--\n Licensed to the Apache Software Foundation (ASF) under one or more\n contributor license agreements.  See the NOTICE file distributed with\n this work for additional information regarding copyright ownership.\n The ASF licenses this file to You under the Apache License, Version 2.0\n (the \"License\"); you may not use this file except in compliance with\n the License.  You may obtain a copy of the License at\n\n     http://www.apache.org/licenses/LICENSE-2.0\n\n Unless required by applicable law or agreed to in writing, software\n distributed under the License is distributed on an \"AS IS\" BASIS,\n WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n See the License for the specific language governing permissions and\n limitations under the License.\n-->\n\n<!--\n This is the Solr schema file. This file should be named \"schema.xml\" and\n should be in the conf directory under the solr home\n (i.e. ./solr/conf/schema.xml by default)\n or located where the classloader for the Solr webapp can find it.\n\n This example schema is the recommended starting point for users.\n It should be kept correct and concise, usable out-of-the-box.\n\n For more information, on how to customize this file, please see\n http://wiki.apache.org/solr/SchemaXml\n\n PERFORMANCE NOTE: this schema includes many optional features and should not\n be used for benchmarking.  To improve performance one could\n  - set stored=\"false\" for all fields possible (esp large fields) when you\n    only need to search on the field but don't need to return the original\n    value.\n  - set indexed=\"false\" if you don't need to search on the field, but only\n    return the field as a result of searching on other indexed fields.\n  - remove all unneeded copyField statements\n  - for best index size and searching performance, set \"index\" to false\n    for all general text fields, use copyField to copy them to the\n    catchall \"text\" field, and use that for searching.\n  - For maximum indexing performance, use the ConcurrentUpdateSolrServer\n    java client.\n  - Remember to run the JVM in server mode, and use a higher logging level\n    that avoids logging every request\n-->\n\n<schema name=\"haystack-schema\" version=\"1.6\">\n\n    <!--\n    ######################## django-haystack specifics begin ########################\n    -->\n\n    <fieldType name=\"edge_ngram\" class=\"solr.TextField\" positionIncrementGap=\"1\">\n        <analyzer type=\"index\">\n            <tokenizer class=\"solr.WhitespaceTokenizerFactory\" />\n            <filter class=\"solr.LowerCaseFilterFactory\" />\n            <filter class=\"solr.WordDelimiterGraphFilterFactory\" generateWordParts=\"1\" generateNumberParts=\"1\" catenateWords=\"0\" catenateNumbers=\"0\" catenateAll=\"0\" splitOnCaseChange=\"1\"/>\n            <filter class=\"solr.EdgeNGramFilterFactory\" minGramSize=\"2\" maxGramSize=\"15\" />\n        </analyzer>\n        <analyzer type=\"query\">\n            <tokenizer class=\"solr.WhitespaceTokenizerFactory\" />\n            <filter class=\"solr.LowerCaseFilterFactory\" />\n            <filter class=\"solr.WordDelimiterGraphFilterFactory\" generateWordParts=\"1\" generateNumberParts=\"1\" catenateWords=\"0\" catenateNumbers=\"0\" catenateAll=\"0\" splitOnCaseChange=\"1\"/>\n        </analyzer>\n    </fieldType>\n\n    <fieldType name=\"ngram\" class=\"solr.TextField\" >\n        <analyzer type=\"index\">\n            <tokenizer class=\"solr.KeywordTokenizerFactory\"/>\n            <filter class=\"solr.LowerCaseFilterFactory\"/>\n            <filter class=\"solr.NGramFilterFactory\" minGramSize=\"3\" maxGramSize=\"15\" />\n        </analyzer>\n        <analyzer type=\"query\">\n            <tokenizer class=\"solr.KeywordTokenizerFactory\"/>\n            <filter class=\"solr.LowerCaseFilterFactory\"/>\n        </analyzer>\n    </fieldType>\n\n    <field name=\"{{ ID }}\" type=\"string\" indexed=\"true\" stored=\"true\" multiValued=\"false\" required=\"true\"/>\n    <field name=\"{{ DJANGO_CT }}\" type=\"string\" indexed=\"true\" stored=\"true\" multiValued=\"false\"/>\n    <field name=\"{{ DJANGO_ID }}\" type=\"string\" indexed=\"true\" stored=\"true\" multiValued=\"false\"/>\n    {% for field in fields %}\n    <field name=\"{{ field.field_name }}\" type=\"{{ field.type }}\" indexed=\"{{ field.indexed }}\" stored=\"{{ field.stored }}\" multiValued=\"{{ field.multi_valued }}\" />\n    {% endfor %}\n    <uniqueKey>{{ ID }}</uniqueKey>\n\n    <!--\n    ######################## django-haystack specifics end ########################\n    -->\n\n    <!-- attribute \"name\" is the name of this schema and is only used for display purposes.\n       version=\"x.y\" is Solr's version number for the schema syntax and\n       semantics.  It should not normally be changed by applications.\n\n       1.0: multiValued attribute did not exist, all fields are multiValued\n            by nature\n       1.1: multiValued attribute introduced, false by default\n       1.2: omitTermFreqAndPositions attribute introduced, true by default\n            except for text fields.\n       1.3: removed optional field compress feature\n       1.4: autoGeneratePhraseQueries attribute introduced to drive QueryParser\n            behavior when a single string produces multiple tokens.  Defaults\n            to off for version >= 1.4\n       1.5: omitNorms defaults to true for primitive field types\n            (int, float, boolean, string...)\n       1.6: useDocValuesAsStored defaults to true.\n    -->\n\n    <!-- Valid attributes for fields:\n     name: mandatory - the name for the field\n     type: mandatory - the name of a field type from the\n       fieldTypes section\n     indexed: true if this field should be indexed (searchable or sortable)\n     stored: true if this field should be retrievable\n     docValues: true if this field should have doc values. Doc values are\n       useful for faceting, grouping, sorting and function queries. Although not\n       required, doc values will make the index faster to load, more\n       NRT-friendly and more memory-efficient. They however come with some\n       limitations: they are currently only supported by StrField, UUIDField\n       and all Trie*Fields, and depending on the field type, they might\n       require the field to be single-valued, be required or have a default\n       value (check the documentation of the field type you're interested in\n       for more information)\n     multiValued: true if this field may contain multiple values per document\n     omitNorms: (expert) set to true to omit the norms associated with\n       this field (this disables length normalization and index-time\n       boosting for the field, and saves some memory).  Only full-text\n       fields or fields that need an index-time boost need norms.\n       Norms are omitted for primitive (non-analyzed) types by default.\n     termVectors: [false] set to true to store the term vector for a\n       given field.\n       When using MoreLikeThis, fields used for similarity should be\n       stored for best performance.\n     termPositions: Store position information with the term vector.\n       This will increase storage costs.\n     termOffsets: Store offset information with the term vector. This\n       will increase storage costs.\n     required: The field is required.  It will throw an error if the\n       value does not exist\n     default: a value that should be used if no value is specified\n       when adding a document.\n    -->\n\n    <!-- field names should consist of alphanumeric or underscore characters only and\n      not start with a digit.  This is not currently strictly enforced,\n      but other field names will not have first class support from all components\n      and back compatibility is not guaranteed.  Names with both leading and\n      trailing underscores (e.g. _version_) are reserved.\n    -->\n\n    <!-- In this data_driven_schema_configs configset, only three fields are pre-declared:\n         id, _version_, and _text_.  All other fields will be type guessed and added via the\n         \"add-unknown-fields-to-the-schema\" update request processor chain declared\n         in solrconfig.xml.\n\n         Note that many dynamic fields are also defined - you can use them to specify a\n         field's type via field naming conventions - see below.\n\n         WARNING: The _text_ catch-all field will significantly increase your index size.\n         If you don't need it, consider removing it and the corresponding copyField directive.\n    -->\n\n    <field name=\"_version_\" type=\"long\" indexed=\"true\" stored=\"false\"/>\n    <field name=\"_root_\" type=\"string\" indexed=\"true\" stored=\"false\" docValues=\"false\" />\n    <!--<field name=\"_text_\" type=\"edge_ngram\" indexed=\"true\" stored=\"false\" multiValued=\"true\"/>\n    <copyField source=\"*\" dest=\"_text_\"/>-->\n\n\n    <!-- Dynamic field definitions allow using convention over configuration\n       for fields via the specification of patterns to match field names.\n       EXAMPLE:  name=\"*_i\" will match any field ending in _i (like myid_i, z_i)\n       RESTRICTION: the glob-like pattern in the name attribute must have\n       a \"*\" only at the start or the end.  -->\n\n    <dynamicField name=\"*_i\"  type=\"int\"    indexed=\"true\"  stored=\"true\"/>\n    <dynamicField name=\"*_is\" type=\"ints\"    indexed=\"true\"  stored=\"true\"/>\n    <dynamicField name=\"*_s\"  type=\"string\"  indexed=\"true\"  stored=\"true\" />\n    <dynamicField name=\"*_ss\" type=\"strings\"  indexed=\"true\"  stored=\"true\"/>\n    <dynamicField name=\"*_l\"  type=\"long\"   indexed=\"true\"  stored=\"true\"/>\n    <dynamicField name=\"*_ls\" type=\"longs\"   indexed=\"true\"  stored=\"true\"/>\n    <dynamicField name=\"*_t\"   type=\"text_general\" indexed=\"true\" stored=\"true\"/>\n    <dynamicField name=\"*_txt\" type=\"text_general\" indexed=\"true\" stored=\"true\"/>\n    <dynamicField name=\"*_b\"  type=\"boolean\" indexed=\"true\" stored=\"true\"/>\n    <dynamicField name=\"*_bs\" type=\"booleans\" indexed=\"true\" stored=\"true\"/>\n    <dynamicField name=\"*_f\"  type=\"float\"  indexed=\"true\"  stored=\"true\"/>\n    <dynamicField name=\"*_fs\" type=\"floats\"  indexed=\"true\"  stored=\"true\"/>\n    <dynamicField name=\"*_d\"  type=\"double\" indexed=\"true\"  stored=\"true\"/>\n    <dynamicField name=\"*_ds\" type=\"doubles\" indexed=\"true\"  stored=\"true\"/>\n\n    <!-- Type used to index the lat and lon components for the \"location\" FieldType -->\n    <dynamicField name=\"*_coordinate\"  type=\"tdouble\" indexed=\"true\"  stored=\"false\" useDocValuesAsStored=\"false\" />\n\n    <dynamicField name=\"*_dt\"  type=\"date\"    indexed=\"true\"  stored=\"true\"/>\n    <dynamicField name=\"*_dts\" type=\"date\"    indexed=\"true\"  stored=\"true\" multiValued=\"true\"/>\n    <dynamicField name=\"*_p\"  type=\"location\" indexed=\"true\" stored=\"true\"/>\n    <dynamicField name=\"*_srpt\"  type=\"location_rpt\" indexed=\"true\" stored=\"true\"/>\n\n    <!-- some trie-coded dynamic fields for faster range queries -->\n    <dynamicField name=\"*_ti\" type=\"tint\"    indexed=\"true\"  stored=\"true\"/>\n    <dynamicField name=\"*_tis\" type=\"tints\"    indexed=\"true\"  stored=\"true\"/>\n    <dynamicField name=\"*_tl\" type=\"tlong\"   indexed=\"true\"  stored=\"true\"/>\n    <dynamicField name=\"*_tls\" type=\"tlongs\"   indexed=\"true\"  stored=\"true\"/>\n    <dynamicField name=\"*_tf\" type=\"tfloat\"  indexed=\"true\"  stored=\"true\"/>\n    <dynamicField name=\"*_tfs\" type=\"tfloats\"  indexed=\"true\"  stored=\"true\"/>\n    <dynamicField name=\"*_td\" type=\"tdouble\" indexed=\"true\"  stored=\"true\"/>\n    <dynamicField name=\"*_tds\" type=\"tdoubles\" indexed=\"true\"  stored=\"true\"/>\n    <dynamicField name=\"*_tdt\" type=\"tdate\"  indexed=\"true\"  stored=\"true\"/>\n    <dynamicField name=\"*_tdts\" type=\"tdates\"  indexed=\"true\"  stored=\"true\"/>\n\n    <dynamicField name=\"*_c\"   type=\"currency\" indexed=\"true\"  stored=\"true\"/>\n\n    <dynamicField name=\"ignored_*\" type=\"ignored\" multiValued=\"true\"/>\n    <dynamicField name=\"attr_*\" type=\"text_general\" indexed=\"true\" stored=\"true\" multiValued=\"true\"/>\n\n    <dynamicField name=\"random_*\" type=\"random\" />\n\n    <!-- uncomment the following to ignore any fields that don't already match an existing\n        field name or dynamic field, rather than reporting them as an error.\n        alternately, change the type=\"ignored\" to some other type e.g. \"text\" if you want\n        unknown fields indexed and/or stored by default\n\n        NB: use of \"*\" dynamic fields will disable field type guessing and adding\n        unknown fields to the schema. -->\n    <!--dynamicField name=\"*\" type=\"ignored\" multiValued=\"true\" /-->\n\n    <!-- Field to use to determine and enforce document uniqueness.\n      Unless this field is marked with required=\"false\", it will be a required field\n    -->\n    <uniqueKey>id</uniqueKey>\n\n    <!-- copyField commands copy one field to another at the time a document\n       is added to the index.  It's used either to index the same field differently,\n       or to add multiple fields to the same field for easier/faster searching.\n\n    <copyField source=\"sourceFieldName\" dest=\"destinationFieldName\"/>\n    -->\n\n    <!-- field type definitions. The \"name\" attribute is\n       just a label to be used by field definitions.  The \"class\"\n       attribute and any other attributes determine the real\n       behavior of the fieldType.\n         Class names starting with \"solr\" refer to java classes in a\n       standard package such as org.apache.solr.analysis\n    -->\n\n    <!-- The StrField type is not analyzed, but indexed/stored verbatim.\n       It supports doc values but in that case the field needs to be\n       single-valued and either required or have a default value.\n      -->\n    <fieldType name=\"string\" class=\"solr.StrField\" sortMissingLast=\"true\" docValues=\"true\" />\n    <fieldType name=\"strings\" class=\"solr.StrField\" sortMissingLast=\"true\" multiValued=\"true\" docValues=\"true\" />\n\n    <!-- boolean type: \"true\" or \"false\" -->\n    <fieldType name=\"boolean\" class=\"solr.BoolField\" sortMissingLast=\"true\"/>\n\n    <fieldType name=\"booleans\" class=\"solr.BoolField\" sortMissingLast=\"true\" multiValued=\"true\"/>\n\n    <!-- sortMissingLast and sortMissingFirst attributes are optional attributes are\n         currently supported on types that are sorted internally as strings\n         and on numeric types.\n\t     This includes \"string\",\"boolean\", and, as of 3.5 (and 4.x),\n\t     int, float, long, date, double, including the \"Trie\" variants.\n       - If sortMissingLast=\"true\", then a sort on this field will cause documents\n         without the field to come after documents with the field,\n         regardless of the requested sort order (asc or desc).\n       - If sortMissingFirst=\"true\", then a sort on this field will cause documents\n         without the field to come before documents with the field,\n         regardless of the requested sort order.\n       - If sortMissingLast=\"false\" and sortMissingFirst=\"false\" (the default),\n         then default lucene sorting will be used which places docs without the\n         field first in an ascending sort and last in a descending sort.\n    -->\n\n    <!--\n      Default numeric field types. For faster range queries, consider the tint/tfloat/tlong/tdouble types.\n\n      These fields support doc values, but they require the field to be\n      single-valued and either be required or have a default value.\n    -->\n    <fieldType name=\"int\" class=\"solr.TrieIntField\" docValues=\"true\" precisionStep=\"0\" positionIncrementGap=\"0\"/>\n    <fieldType name=\"float\" class=\"solr.TrieFloatField\" docValues=\"true\" precisionStep=\"0\" positionIncrementGap=\"0\"/>\n    <fieldType name=\"long\" class=\"solr.TrieLongField\" docValues=\"true\" precisionStep=\"0\" positionIncrementGap=\"0\"/>\n    <fieldType name=\"double\" class=\"solr.TrieDoubleField\" docValues=\"true\" precisionStep=\"0\" positionIncrementGap=\"0\"/>\n\n    <fieldType name=\"ints\" class=\"solr.TrieIntField\" docValues=\"true\" precisionStep=\"0\" positionIncrementGap=\"0\" multiValued=\"true\"/>\n    <fieldType name=\"floats\" class=\"solr.TrieFloatField\" docValues=\"true\" precisionStep=\"0\" positionIncrementGap=\"0\" multiValued=\"true\"/>\n    <fieldType name=\"longs\" class=\"solr.TrieLongField\" docValues=\"true\" precisionStep=\"0\" positionIncrementGap=\"0\" multiValued=\"true\"/>\n    <fieldType name=\"doubles\" class=\"solr.TrieDoubleField\" docValues=\"true\" precisionStep=\"0\" positionIncrementGap=\"0\" multiValued=\"true\"/>\n\n    <!--\n     Numeric field types that index each value at various levels of precision\n     to accelerate range queries when the number of values between the range\n     endpoints is large. See the javadoc for NumericRangeQuery for internal\n     implementation details.\n\n     Smaller precisionStep values (specified in bits) will lead to more tokens\n     indexed per value, slightly larger index size, and faster range queries.\n     A precisionStep of 0 disables indexing at different precision levels.\n    -->\n    <fieldType name=\"tint\" class=\"solr.TrieIntField\" docValues=\"true\" precisionStep=\"8\" positionIncrementGap=\"0\"/>\n    <fieldType name=\"tfloat\" class=\"solr.TrieFloatField\" docValues=\"true\" precisionStep=\"8\" positionIncrementGap=\"0\"/>\n    <fieldType name=\"tlong\" class=\"solr.TrieLongField\" docValues=\"true\" precisionStep=\"8\" positionIncrementGap=\"0\"/>\n    <fieldType name=\"tdouble\" class=\"solr.TrieDoubleField\" docValues=\"true\" precisionStep=\"8\" positionIncrementGap=\"0\"/>\n\n    <fieldType name=\"tints\" class=\"solr.TrieIntField\" docValues=\"true\" precisionStep=\"8\" positionIncrementGap=\"0\" multiValued=\"true\"/>\n    <fieldType name=\"tfloats\" class=\"solr.TrieFloatField\" docValues=\"true\" precisionStep=\"8\" positionIncrementGap=\"0\" multiValued=\"true\"/>\n    <fieldType name=\"tlongs\" class=\"solr.TrieLongField\" docValues=\"true\" precisionStep=\"8\" positionIncrementGap=\"0\" multiValued=\"true\"/>\n    <fieldType name=\"tdoubles\" class=\"solr.TrieDoubleField\" docValues=\"true\" precisionStep=\"8\" positionIncrementGap=\"0\" multiValued=\"true\"/>\n\n    <!-- The format for this date field is of the form 1995-12-31T23:59:59Z, and\n         is a more restricted form of the canonical representation of dateTime\n         http://www.w3.org/TR/xmlschema-2/#dateTime\n         The trailing \"Z\" designates UTC time and is mandatory.\n         Optional fractional seconds are allowed: 1995-12-31T23:59:59.999Z\n         All other components are mandatory.\n\n         Expressions can also be used to denote calculations that should be\n         performed relative to \"NOW\" to determine the value, ie...\n\n               NOW/HOUR\n                  ... Round to the start of the current hour\n               NOW-1DAY\n                  ... Exactly 1 day prior to now\n               NOW/DAY+6MONTHS+3DAYS\n                  ... 6 months and 3 days in the future from the start of\n                      the current day\n\n         Consult the TrieDateField javadocs for more information.\n\n         Note: For faster range queries, consider the tdate type\n      -->\n    <fieldType name=\"date\" class=\"solr.TrieDateField\" docValues=\"true\" precisionStep=\"0\" positionIncrementGap=\"0\"/>\n    <fieldType name=\"dates\" class=\"solr.TrieDateField\" docValues=\"true\" precisionStep=\"0\" positionIncrementGap=\"0\" multiValued=\"true\"/>\n\n    <!-- A Trie based date field for faster date range queries and date faceting. -->\n    <fieldType name=\"tdate\" class=\"solr.TrieDateField\" docValues=\"true\" precisionStep=\"6\" positionIncrementGap=\"0\"/>\n\n    <fieldType name=\"tdates\" class=\"solr.TrieDateField\" docValues=\"true\" precisionStep=\"6\" positionIncrementGap=\"0\" multiValued=\"true\"/>\n\n\n    <!--Binary data type. The data should be sent/retrieved in as Base64 encoded Strings -->\n    <fieldType name=\"binary\" class=\"solr.BinaryField\"/>\n\n    <!-- The \"RandomSortField\" is not used to store or search any\n         data.  You can declare fields of this type it in your schema\n         to generate pseudo-random orderings of your docs for sorting\n         or function purposes.  The ordering is generated based on the field\n         name and the version of the index. As long as the index version\n         remains unchanged, and the same field name is reused,\n         the ordering of the docs will be consistent.\n         If you want different psuedo-random orderings of documents,\n         for the same version of the index, use a dynamicField and\n         change the field name in the request.\n     -->\n    <fieldType name=\"random\" class=\"solr.RandomSortField\" indexed=\"true\" />\n\n    <!-- solr.TextField allows the specification of custom text analyzers\n         specified as a tokenizer and a list of token filters. Different\n         analyzers may be specified for indexing and querying.\n\n         The optional positionIncrementGap puts space between multiple fields of\n         this type on the same document, with the purpose of preventing false phrase\n         matching across fields.\n\n         For more info on customizing your analyzer chain, please see\n         http://wiki.apache.org/solr/AnalyzersTokenizersTokenFilters\n     -->\n\n    <!-- One can also specify an existing Analyzer class that has a\n         default constructor via the class attribute on the analyzer element.\n         Example:\n    <fieldType name=\"text_greek\" class=\"solr.TextField\">\n      <analyzer class=\"org.apache.lucene.analysis.el.GreekAnalyzer\"/>\n    </fieldType>\n    -->\n\n    <!-- A text field that only splits on whitespace for exact matching of words -->\n    <dynamicField name=\"*_ws\" type=\"text_ws\"  indexed=\"true\"  stored=\"true\"/>\n    <fieldType name=\"text_ws\" class=\"solr.TextField\" positionIncrementGap=\"100\">\n        <analyzer>\n            <tokenizer class=\"solr.WhitespaceTokenizerFactory\"/>\n        </analyzer>\n    </fieldType>\n\n    <!-- A general text field that has reasonable, generic\n         cross-language defaults: it tokenizes with StandardTokenizer,\n\t       removes stop words from case-insensitive \"stopwords.txt\"\n\t       (empty by default), and down cases.  At query time only, it\n\t       also applies synonyms.\n\t  -->\n    <fieldType name=\"text_general\" class=\"solr.TextField\" positionIncrementGap=\"100\" multiValued=\"true\">\n        <analyzer type=\"index\">\n            <tokenizer class=\"solr.StandardTokenizerFactory\"/>\n            <filter class=\"solr.StopFilterFactory\" ignoreCase=\"true\" words=\"stopwords.txt\" />\n            <!-- in this example, we will only use synonyms at query time\n            <filter class=\"solr.SynonymFilterFactory\" synonyms=\"index_synonyms.txt\" ignoreCase=\"true\" expand=\"false\"/>\n            -->\n            <filter class=\"solr.LowerCaseFilterFactory\"/>\n        </analyzer>\n        <analyzer type=\"query\">\n            <tokenizer class=\"solr.StandardTokenizerFactory\"/>\n            <filter class=\"solr.StopFilterFactory\" ignoreCase=\"true\" words=\"stopwords.txt\" />\n            <filter class=\"solr.SynonymGraphFilterFactory\" synonyms=\"synonyms.txt\"\n             format=\"solr\" ignoreCase=\"false\" expand=\"true\"\n             tokenizerFactory=\"solr.WhitespaceTokenizerFactory\"/>\n            <filter class=\"solr.LowerCaseFilterFactory\"/>\n        </analyzer>\n    </fieldType>\n\n    <!-- A text field with defaults appropriate for English: it\n         tokenizes with StandardTokenizer, removes English stop words\n         (lang/stopwords_en.txt), down cases, protects words from protwords.txt, and\n         finally applies Porter's stemming.  The query time analyzer\n         also applies synonyms from synonyms.txt. -->\n    <dynamicField name=\"*_txt_en\" type=\"text_en\"  indexed=\"true\"  stored=\"true\"/>\n    <fieldType name=\"text_en\" class=\"solr.TextField\" positionIncrementGap=\"100\">\n        <analyzer type=\"index\">\n            <tokenizer class=\"solr.StandardTokenizerFactory\"/>\n            <!-- in this example, we will only use synonyms at query time\n            <filter class=\"solr.SynonymFilterFactory\" synonyms=\"index_synonyms.txt\" ignoreCase=\"true\" expand=\"false\"/>\n            -->\n            <!-- Case insensitive stop word removal.\n            -->\n            <filter class=\"solr.StopFilterFactory\"\n                    ignoreCase=\"true\"\n                    words=\"lang/stopwords_en.txt\"\n            />\n            <filter class=\"solr.LowerCaseFilterFactory\"/>\n            <filter class=\"solr.EnglishPossessiveFilterFactory\"/>\n            <filter class=\"solr.KeywordMarkerFilterFactory\" protected=\"protwords.txt\"/>\n            <!-- Optionally you may want to use this less aggressive stemmer instead of PorterStemFilterFactory:\n            <filter class=\"solr.EnglishMinimalStemFilterFactory\"/>\n              -->\n            <filter class=\"solr.PorterStemFilterFactory\"/>\n        </analyzer>\n        <analyzer type=\"query\">\n            <tokenizer class=\"solr.StandardTokenizerFactory\"/>\n            <filter class=\"solr.SynonymGraphFilterFactory\" synonyms=\"synonyms.txt\"\n             format=\"solr\" ignoreCase=\"false\" expand=\"true\"\n             tokenizerFactory=\"solr.WhitespaceTokenizerFactory\"/>\n            <filter class=\"solr.StopFilterFactory\"\n                    ignoreCase=\"true\"\n                    words=\"lang/stopwords_en.txt\"\n            />\n            <filter class=\"solr.LowerCaseFilterFactory\"/>\n            <filter class=\"solr.EnglishPossessiveFilterFactory\"/>\n            <filter class=\"solr.KeywordMarkerFilterFactory\" protected=\"protwords.txt\"/>\n            <!-- Optionally you may want to use this less aggressive stemmer instead of PorterStemFilterFactory:\n            <filter class=\"solr.EnglishMinimalStemFilterFactory\"/>\n              -->\n            <filter class=\"solr.PorterStemFilterFactory\"/>\n        </analyzer>\n    </fieldType>\n\n    <!-- A text field with defaults appropriate for English, plus\n         aggressive word-splitting and autophrase features enabled.\n         This field is just like text_en, except it adds\n         WordDelimiterFilter to enable splitting and matching of\n         words on case-change, alpha numeric boundaries, and\n         non-alphanumeric chars.  This means certain compound word\n         cases will work, for example query \"wi fi\" will match\n         document \"WiFi\" or \"wi-fi\".\n    -->\n    <dynamicField name=\"*_txt_en_split\" type=\"text_en_splitting\"  indexed=\"true\"  stored=\"true\"/>\n    <fieldType name=\"text_en_splitting\" class=\"solr.TextField\" positionIncrementGap=\"100\" autoGeneratePhraseQueries=\"true\">\n        <analyzer type=\"index\">\n            <tokenizer class=\"solr.WhitespaceTokenizerFactory\"/>\n            <!-- in this example, we will only use synonyms at query time\n            <filter class=\"solr.SynonymFilterFactory\" synonyms=\"index_synonyms.txt\" ignoreCase=\"true\" expand=\"false\"/>\n            -->\n            <!-- Case insensitive stop word removal.\n            -->\n            <filter class=\"solr.StopFilterFactory\"\n                    ignoreCase=\"true\"\n                    words=\"lang/stopwords_en.txt\"\n            />\n            <filter class=\"solr.WordDelimiterGraphFilterFactory\" generateWordParts=\"1\" generateNumberParts=\"1\" catenateWords=\"1\" catenateNumbers=\"1\" catenateAll=\"0\" splitOnCaseChange=\"1\"/>\n            <filter class=\"solr.LowerCaseFilterFactory\"/>\n            <filter class=\"solr.KeywordMarkerFilterFactory\" protected=\"protwords.txt\"/>\n            <filter class=\"solr.PorterStemFilterFactory\"/>\n        </analyzer>\n        <analyzer type=\"query\">\n            <tokenizer class=\"solr.WhitespaceTokenizerFactory\"/>\n            <filter class=\"solr.SynonymGraphFilterFactory\" synonyms=\"synonyms.txt\"\n             format=\"solr\" ignoreCase=\"false\" expand=\"true\"\n             tokenizerFactory=\"solr.WhitespaceTokenizerFactory\"/>\n            <filter class=\"solr.StopFilterFactory\"\n                    ignoreCase=\"true\"\n                    words=\"lang/stopwords_en.txt\"\n            />\n            <filter class=\"solr.WordDelimiterGraphFilterFactory\" generateWordParts=\"1\" generateNumberParts=\"1\" catenateWords=\"0\" catenateNumbers=\"0\" catenateAll=\"0\" splitOnCaseChange=\"1\"/>\n            <filter class=\"solr.LowerCaseFilterFactory\"/>\n            <filter class=\"solr.KeywordMarkerFilterFactory\" protected=\"protwords.txt\"/>\n            <filter class=\"solr.PorterStemFilterFactory\"/>\n        </analyzer>\n    </fieldType>\n\n    <!-- Less flexible matching, but less false matches.  Probably not ideal for product names,\n         but may be good for SKUs.  Can insert dashes in the wrong place and still match. -->\n    <dynamicField name=\"*_txt_en_split_tight\" type=\"text_en_splitting_tight\"  indexed=\"true\"  stored=\"true\"/>\n    <fieldType name=\"text_en_splitting_tight\" class=\"solr.TextField\" positionIncrementGap=\"100\" autoGeneratePhraseQueries=\"true\">\n        <analyzer>\n            <tokenizer class=\"solr.WhitespaceTokenizerFactory\"/>\n            <filter class=\"solr.SynonymGraphFilterFactory\" synonyms=\"synonyms.txt\"\n             format=\"solr\" ignoreCase=\"false\" expand=\"false\"\n             tokenizerFactory=\"solr.WhitespaceTokenizerFactory\"/>\n            <filter class=\"solr.StopFilterFactory\" ignoreCase=\"true\" words=\"lang/stopwords_en.txt\"/>\n            <filter class=\"solr.WordDelimiterGraphFilterFactory\" generateWordParts=\"0\" generateNumberParts=\"0\" catenateWords=\"1\" catenateNumbers=\"1\" catenateAll=\"0\"/>\n            <filter class=\"solr.LowerCaseFilterFactory\"/>\n            <filter class=\"solr.KeywordMarkerFilterFactory\" protected=\"protwords.txt\"/>\n            <filter class=\"solr.EnglishMinimalStemFilterFactory\"/>\n            <!-- this filter can remove any duplicate tokens that appear at the same position - sometimes\n                 possible with WordDelimiterFilter in conjuncton with stemming. -->\n            <filter class=\"solr.RemoveDuplicatesTokenFilterFactory\"/>\n        </analyzer>\n    </fieldType>\n\n    <!-- Just like text_general except it reverses the characters of\n\t       each token, to enable more efficient leading wildcard queries.\n    -->\n    <dynamicField name=\"*_txt_rev\" type=\"text_general_rev\"  indexed=\"true\"  stored=\"true\"/>\n    <fieldType name=\"text_general_rev\" class=\"solr.TextField\" positionIncrementGap=\"100\">\n        <analyzer type=\"index\">\n            <tokenizer class=\"solr.StandardTokenizerFactory\"/>\n            <filter class=\"solr.StopFilterFactory\" ignoreCase=\"true\" words=\"stopwords.txt\" />\n            <filter class=\"solr.LowerCaseFilterFactory\"/>\n            <filter class=\"solr.ReversedWildcardFilterFactory\" withOriginal=\"true\"\n                    maxPosAsterisk=\"3\" maxPosQuestion=\"2\" maxFractionAsterisk=\"0.33\"/>\n        </analyzer>\n        <analyzer type=\"query\">\n            <tokenizer class=\"solr.StandardTokenizerFactory\"/>\n            <filter class=\"solr.SynonymGraphFilterFactory\" synonyms=\"synonyms.txt\"\n             format=\"solr\" ignoreCase=\"false\" expand=\"true\"\n             tokenizerFactory=\"solr.WhitespaceTokenizerFactory\"/>\n            <filter class=\"solr.StopFilterFactory\" ignoreCase=\"true\" words=\"stopwords.txt\" />\n            <filter class=\"solr.LowerCaseFilterFactory\"/>\n        </analyzer>\n    </fieldType>\n\n    <dynamicField name=\"*_phon_en\" type=\"phonetic_en\"  indexed=\"true\"  stored=\"true\"/>\n    <fieldType name=\"phonetic_en\" stored=\"false\" indexed=\"true\" class=\"solr.TextField\" >\n        <analyzer>\n            <tokenizer class=\"solr.StandardTokenizerFactory\"/>\n            <filter class=\"solr.DoubleMetaphoneFilterFactory\" inject=\"false\"/>\n        </analyzer>\n    </fieldType>\n\n    <!-- lowercases the entire field value, keeping it as a single token.  -->\n    <dynamicField name=\"*_s_lower\" type=\"lowercase\"  indexed=\"true\"  stored=\"true\"/>\n    <fieldType name=\"lowercase\" class=\"solr.TextField\" positionIncrementGap=\"100\">\n        <analyzer>\n            <tokenizer class=\"solr.KeywordTokenizerFactory\"/>\n            <filter class=\"solr.LowerCaseFilterFactory\" />\n        </analyzer>\n    </fieldType>\n\n    <!--\n      Example of using PathHierarchyTokenizerFactory at index time, so\n      queries for paths match documents at that path, or in descendent paths\n    -->\n    <dynamicField name=\"*_descendent_path\" type=\"descendent_path\"  indexed=\"true\"  stored=\"true\"/>\n    <fieldType name=\"descendent_path\" class=\"solr.TextField\">\n        <analyzer type=\"index\">\n            <tokenizer class=\"solr.PathHierarchyTokenizerFactory\" delimiter=\"/\" />\n        </analyzer>\n        <analyzer type=\"query\">\n            <tokenizer class=\"solr.KeywordTokenizerFactory\" />\n        </analyzer>\n    </fieldType>\n\n    <!--\n      Example of using PathHierarchyTokenizerFactory at query time, so\n      queries for paths match documents at that path, or in ancestor paths\n    -->\n    <dynamicField name=\"*_ancestor_path\" type=\"ancestor_path\"  indexed=\"true\"  stored=\"true\"/>\n    <fieldType name=\"ancestor_path\" class=\"solr.TextField\">\n        <analyzer type=\"index\">\n            <tokenizer class=\"solr.KeywordTokenizerFactory\" />\n        </analyzer>\n        <analyzer type=\"query\">\n            <tokenizer class=\"solr.PathHierarchyTokenizerFactory\" delimiter=\"/\" />\n        </analyzer>\n    </fieldType>\n\n    <!-- since fields of this type are by default not stored or indexed,\n         any data added to them will be ignored outright.  -->\n    <fieldType name=\"ignored\" stored=\"false\" indexed=\"false\" docValues=\"false\" multiValued=\"true\" class=\"solr.StrField\" />\n\n    <!-- This point type indexes the coordinates as separate fields (subFields)\n      If subFieldType is defined, it references a type, and a dynamic field\n      definition is created matching *___<typename>.  Alternately, if\n      subFieldSuffix is defined, that is used to create the subFields.\n      Example: if subFieldType=\"double\", then the coordinates would be\n        indexed in fields myloc_0___double,myloc_1___double.\n      Example: if subFieldSuffix=\"_d\" then the coordinates would be indexed\n        in fields myloc_0_d,myloc_1_d\n      The subFields are an implementation detail of the fieldType, and end\n      users normally should not need to know about them.\n     -->\n    <dynamicField name=\"*_point\" type=\"point\"  indexed=\"true\"  stored=\"true\"/>\n    <fieldType name=\"point\" class=\"solr.PointType\" dimension=\"2\" subFieldSuffix=\"_d\"/>\n\n    <!-- A specialized field for geospatial search. If indexed, this fieldType must not be multivalued. -->\n    <fieldType name=\"location\" class=\"solr.LatLonType\" subFieldSuffix=\"_coordinate\"/>\n\n    <!-- An alternative geospatial field type new to Solr 4.  It supports multiValued and polygon shapes.\n      For more information about this and other Spatial fields new to Solr 4, see:\n      http://wiki.apache.org/solr/SolrAdaptersForLuceneSpatial4\n    -->\n    <fieldType name=\"location_rpt\" class=\"solr.SpatialRecursivePrefixTreeFieldType\"\n               geo=\"true\" distErrPct=\"0.025\" maxDistErr=\"0.001\" distanceUnits=\"kilometers\" />\n\n    <!-- Money/currency field type. See http://wiki.apache.org/solr/MoneyFieldType\n        Parameters:\n          defaultCurrency: Specifies the default currency if none specified. Defaults to \"USD\"\n          precisionStep:   Specifies the precisionStep for the TrieLong field used for the amount\n          providerClass:   Lets you plug in other exchange provider backend:\n                           solr.FileExchangeRateProvider is the default and takes one parameter:\n                             currencyConfig: name of an xml file holding exchange rates\n                           solr.OpenExchangeRatesOrgProvider uses rates from openexchangerates.org:\n                             ratesFileLocation: URL or path to rates JSON file (default latest.json on the web)\n                             refreshInterval: Number of minutes between each rates fetch (default: 1440, min: 60)\n    -->\n    <fieldType name=\"currency\" class=\"solr.CurrencyField\" precisionStep=\"8\" defaultCurrency=\"USD\" currencyConfig=\"currency.xml\" />\n\n\n\n    <!-- some examples for different languages (generally ordered by ISO code) -->\n\n    <!-- Arabic -->\n    <dynamicField name=\"*_txt_ar\" type=\"text_ar\"  indexed=\"true\"  stored=\"true\"/>\n    <fieldType name=\"text_ar\" class=\"solr.TextField\" positionIncrementGap=\"100\">\n        <analyzer>\n            <tokenizer class=\"solr.StandardTokenizerFactory\"/>\n            <!-- for any non-arabic -->\n            <filter class=\"solr.LowerCaseFilterFactory\"/>\n            <filter class=\"solr.StopFilterFactory\" ignoreCase=\"true\" words=\"lang/stopwords_ar.txt\" />\n\n            <filter class=\"solr.ArabicNormalizationFilterFactory\"/>\n            <filter class=\"solr.ArabicStemFilterFactory\"/>\n        </analyzer>\n    </fieldType>\n\n    <!-- Bulgarian -->\n    <dynamicField name=\"*_txt_bg\" type=\"text_bg\"  indexed=\"true\"  stored=\"true\"/>\n    <fieldType name=\"text_bg\" class=\"solr.TextField\" positionIncrementGap=\"100\">\n        <analyzer>\n            <tokenizer class=\"solr.StandardTokenizerFactory\"/>\n            <filter class=\"solr.LowerCaseFilterFactory\"/>\n            <filter class=\"solr.StopFilterFactory\" ignoreCase=\"true\" words=\"lang/stopwords_bg.txt\" />\n            <filter class=\"solr.BulgarianStemFilterFactory\"/>\n        </analyzer>\n    </fieldType>\n\n    <!-- Catalan -->\n    <dynamicField name=\"*_txt_ca\" type=\"text_ca\"  indexed=\"true\"  stored=\"true\"/>\n    <fieldType name=\"text_ca\" class=\"solr.TextField\" positionIncrementGap=\"100\">\n        <analyzer>\n            <tokenizer class=\"solr.StandardTokenizerFactory\"/>\n            <!-- removes l', etc -->\n            <filter class=\"solr.ElisionFilterFactory\" ignoreCase=\"true\" articles=\"lang/contractions_ca.txt\"/>\n            <filter class=\"solr.LowerCaseFilterFactory\"/>\n            <filter class=\"solr.StopFilterFactory\" ignoreCase=\"true\" words=\"lang/stopwords_ca.txt\" />\n            <filter class=\"solr.SnowballPorterFilterFactory\" language=\"Catalan\"/>\n        </analyzer>\n    </fieldType>\n\n    <!-- CJK bigram (see text_ja for a Japanese configuration using morphological analysis) -->\n    <dynamicField name=\"*_txt_cjk\" type=\"text_cjk\"  indexed=\"true\"  stored=\"true\"/>\n    <fieldType name=\"text_cjk\" class=\"solr.TextField\" positionIncrementGap=\"100\">\n        <analyzer>\n            <tokenizer class=\"solr.StandardTokenizerFactory\"/>\n            <!-- normalize width before bigram, as e.g. half-width dakuten combine  -->\n            <filter class=\"solr.CJKWidthFilterFactory\"/>\n            <!-- for any non-CJK -->\n            <filter class=\"solr.LowerCaseFilterFactory\"/>\n            <filter class=\"solr.CJKBigramFilterFactory\"/>\n        </analyzer>\n    </fieldType>\n\n    <!-- Czech -->\n    <dynamicField name=\"*_txt_cz\" type=\"text_cz\"  indexed=\"true\"  stored=\"true\"/>\n    <fieldType name=\"text_cz\" class=\"solr.TextField\" positionIncrementGap=\"100\">\n        <analyzer>\n            <tokenizer class=\"solr.StandardTokenizerFactory\"/>\n            <filter class=\"solr.LowerCaseFilterFactory\"/>\n            <filter class=\"solr.StopFilterFactory\" ignoreCase=\"true\" words=\"lang/stopwords_cz.txt\" />\n            <filter class=\"solr.CzechStemFilterFactory\"/>\n        </analyzer>\n    </fieldType>\n\n    <!-- Danish -->\n    <dynamicField name=\"*_txt_da\" type=\"text_da\"  indexed=\"true\"  stored=\"true\"/>\n    <fieldType name=\"text_da\" class=\"solr.TextField\" positionIncrementGap=\"100\">\n        <analyzer>\n            <tokenizer class=\"solr.StandardTokenizerFactory\"/>\n            <filter class=\"solr.LowerCaseFilterFactory\"/>\n            <filter class=\"solr.StopFilterFactory\" ignoreCase=\"true\" words=\"lang/stopwords_da.txt\" format=\"snowball\" />\n            <filter class=\"solr.SnowballPorterFilterFactory\" language=\"Danish\"/>\n        </analyzer>\n    </fieldType>\n\n    <!-- German -->\n    <dynamicField name=\"*_txt_de\" type=\"text_de\"  indexed=\"true\"  stored=\"true\"/>\n    <fieldType name=\"text_de\" class=\"solr.TextField\" positionIncrementGap=\"100\">\n        <analyzer>\n            <tokenizer class=\"solr.StandardTokenizerFactory\"/>\n            <filter class=\"solr.LowerCaseFilterFactory\"/>\n            <filter class=\"solr.StopFilterFactory\" ignoreCase=\"true\" words=\"lang/stopwords_de.txt\" format=\"snowball\" />\n            <filter class=\"solr.GermanNormalizationFilterFactory\"/>\n            <filter class=\"solr.GermanLightStemFilterFactory\"/>\n            <!-- less aggressive: <filter class=\"solr.GermanMinimalStemFilterFactory\"/> -->\n            <!-- more aggressive: <filter class=\"solr.SnowballPorterFilterFactory\" language=\"German2\"/> -->\n        </analyzer>\n    </fieldType>\n\n    <!-- Greek -->\n    <dynamicField name=\"*_txt_el\" type=\"text_el\"  indexed=\"true\"  stored=\"true\"/>\n    <fieldType name=\"text_el\" class=\"solr.TextField\" positionIncrementGap=\"100\">\n        <analyzer>\n            <tokenizer class=\"solr.StandardTokenizerFactory\"/>\n            <!-- greek specific lowercase for sigma -->\n            <filter class=\"solr.GreekLowerCaseFilterFactory\"/>\n            <filter class=\"solr.StopFilterFactory\" ignoreCase=\"false\" words=\"lang/stopwords_el.txt\" />\n            <filter class=\"solr.GreekStemFilterFactory\"/>\n        </analyzer>\n    </fieldType>\n\n    <!-- Spanish -->\n    <dynamicField name=\"*_txt_es\" type=\"text_es\"  indexed=\"true\"  stored=\"true\"/>\n    <fieldType name=\"text_es\" class=\"solr.TextField\" positionIncrementGap=\"100\">\n        <analyzer>\n            <tokenizer class=\"solr.StandardTokenizerFactory\"/>\n            <filter class=\"solr.LowerCaseFilterFactory\"/>\n            <filter class=\"solr.StopFilterFactory\" ignoreCase=\"true\" words=\"lang/stopwords_es.txt\" format=\"snowball\" />\n            <filter class=\"solr.SpanishLightStemFilterFactory\"/>\n            <!-- more aggressive: <filter class=\"solr.SnowballPorterFilterFactory\" language=\"Spanish\"/> -->\n        </analyzer>\n    </fieldType>\n\n    <!-- Basque -->\n    <dynamicField name=\"*_txt_eu\" type=\"text_eu\"  indexed=\"true\"  stored=\"true\"/>\n    <fieldType name=\"text_eu\" class=\"solr.TextField\" positionIncrementGap=\"100\">\n        <analyzer>\n            <tokenizer class=\"solr.StandardTokenizerFactory\"/>\n            <filter class=\"solr.LowerCaseFilterFactory\"/>\n            <filter class=\"solr.StopFilterFactory\" ignoreCase=\"true\" words=\"lang/stopwords_eu.txt\" />\n            <filter class=\"solr.SnowballPorterFilterFactory\" language=\"Basque\"/>\n        </analyzer>\n    </fieldType>\n\n    <!-- Persian -->\n    <dynamicField name=\"*_txt_fa\" type=\"text_fa\"  indexed=\"true\"  stored=\"true\"/>\n    <fieldType name=\"text_fa\" class=\"solr.TextField\" positionIncrementGap=\"100\">\n        <analyzer>\n            <!-- for ZWNJ -->\n            <charFilter class=\"solr.PersianCharFilterFactory\"/>\n            <tokenizer class=\"solr.StandardTokenizerFactory\"/>\n            <filter class=\"solr.LowerCaseFilterFactory\"/>\n            <filter class=\"solr.ArabicNormalizationFilterFactory\"/>\n            <filter class=\"solr.PersianNormalizationFilterFactory\"/>\n            <filter class=\"solr.StopFilterFactory\" ignoreCase=\"true\" words=\"lang/stopwords_fa.txt\" />\n        </analyzer>\n    </fieldType>\n\n    <!-- Finnish -->\n    <dynamicField name=\"*_txt_fi\" type=\"text_fi\"  indexed=\"true\"  stored=\"true\"/>\n    <fieldType name=\"text_fi\" class=\"solr.TextField\" positionIncrementGap=\"100\">\n        <analyzer>\n            <tokenizer class=\"solr.StandardTokenizerFactory\"/>\n            <filter class=\"solr.LowerCaseFilterFactory\"/>\n            <filter class=\"solr.StopFilterFactory\" ignoreCase=\"true\" words=\"lang/stopwords_fi.txt\" format=\"snowball\" />\n            <filter class=\"solr.SnowballPorterFilterFactory\" language=\"Finnish\"/>\n            <!-- less aggressive: <filter class=\"solr.FinnishLightStemFilterFactory\"/> -->\n        </analyzer>\n    </fieldType>\n\n    <!-- French -->\n    <dynamicField name=\"*_txt_fr\" type=\"text_fr\"  indexed=\"true\"  stored=\"true\"/>\n    <fieldType name=\"text_fr\" class=\"solr.TextField\" positionIncrementGap=\"100\">\n        <analyzer>\n            <tokenizer class=\"solr.StandardTokenizerFactory\"/>\n            <!-- removes l', etc -->\n            <filter class=\"solr.ElisionFilterFactory\" ignoreCase=\"true\" articles=\"lang/contractions_fr.txt\"/>\n            <filter class=\"solr.LowerCaseFilterFactory\"/>\n            <filter class=\"solr.StopFilterFactory\" ignoreCase=\"true\" words=\"lang/stopwords_fr.txt\" format=\"snowball\" />\n            <filter class=\"solr.FrenchLightStemFilterFactory\"/>\n            <!-- less aggressive: <filter class=\"solr.FrenchMinimalStemFilterFactory\"/> -->\n            <!-- more aggressive: <filter class=\"solr.SnowballPorterFilterFactory\" language=\"French\"/> -->\n        </analyzer>\n    </fieldType>\n\n    <!-- Irish -->\n    <dynamicField name=\"*_txt_ga\" type=\"text_ga\"  indexed=\"true\"  stored=\"true\"/>\n    <fieldType name=\"text_ga\" class=\"solr.TextField\" positionIncrementGap=\"100\">\n        <analyzer>\n            <tokenizer class=\"solr.StandardTokenizerFactory\"/>\n            <!-- removes d', etc -->\n            <filter class=\"solr.ElisionFilterFactory\" ignoreCase=\"true\" articles=\"lang/contractions_ga.txt\"/>\n            <!-- removes n-, etc. position increments is intentionally false! -->\n            <filter class=\"solr.StopFilterFactory\" ignoreCase=\"true\" words=\"lang/hyphenations_ga.txt\"/>\n            <filter class=\"solr.IrishLowerCaseFilterFactory\"/>\n            <filter class=\"solr.StopFilterFactory\" ignoreCase=\"true\" words=\"lang/stopwords_ga.txt\"/>\n            <filter class=\"solr.SnowballPorterFilterFactory\" language=\"Irish\"/>\n        </analyzer>\n    </fieldType>\n\n    <!-- Galician -->\n    <dynamicField name=\"*_txt_gl\" type=\"text_gl\"  indexed=\"true\"  stored=\"true\"/>\n    <fieldType name=\"text_gl\" class=\"solr.TextField\" positionIncrementGap=\"100\">\n        <analyzer>\n            <tokenizer class=\"solr.StandardTokenizerFactory\"/>\n            <filter class=\"solr.LowerCaseFilterFactory\"/>\n            <filter class=\"solr.StopFilterFactory\" ignoreCase=\"true\" words=\"lang/stopwords_gl.txt\" />\n            <filter class=\"solr.GalicianStemFilterFactory\"/>\n            <!-- less aggressive: <filter class=\"solr.GalicianMinimalStemFilterFactory\"/> -->\n        </analyzer>\n    </fieldType>\n\n    <!-- Hindi -->\n    <dynamicField name=\"*_txt_hi\" type=\"text_hi\"  indexed=\"true\"  stored=\"true\"/>\n    <fieldType name=\"text_hi\" class=\"solr.TextField\" positionIncrementGap=\"100\">\n        <analyzer>\n            <tokenizer class=\"solr.StandardTokenizerFactory\"/>\n            <filter class=\"solr.LowerCaseFilterFactory\"/>\n            <!-- normalizes unicode representation -->\n            <filter class=\"solr.IndicNormalizationFilterFactory\"/>\n            <!-- normalizes variation in spelling -->\n            <filter class=\"solr.HindiNormalizationFilterFactory\"/>\n            <filter class=\"solr.StopFilterFactory\" ignoreCase=\"true\" words=\"lang/stopwords_hi.txt\" />\n            <filter class=\"solr.HindiStemFilterFactory\"/>\n        </analyzer>\n    </fieldType>\n\n    <!-- Hungarian -->\n    <dynamicField name=\"*_txt_hu\" type=\"text_hu\"  indexed=\"true\"  stored=\"true\"/>\n    <fieldType name=\"text_hu\" class=\"solr.TextField\" positionIncrementGap=\"100\">\n        <analyzer>\n            <tokenizer class=\"solr.StandardTokenizerFactory\"/>\n            <filter class=\"solr.LowerCaseFilterFactory\"/>\n            <filter class=\"solr.StopFilterFactory\" ignoreCase=\"true\" words=\"lang/stopwords_hu.txt\" format=\"snowball\" />\n            <filter class=\"solr.SnowballPorterFilterFactory\" language=\"Hungarian\"/>\n            <!-- less aggressive: <filter class=\"solr.HungarianLightStemFilterFactory\"/> -->\n        </analyzer>\n    </fieldType>\n\n    <!-- Armenian -->\n    <dynamicField name=\"*_txt_hy\" type=\"text_hy\"  indexed=\"true\"  stored=\"true\"/>\n    <fieldType name=\"text_hy\" class=\"solr.TextField\" positionIncrementGap=\"100\">\n        <analyzer>\n            <tokenizer class=\"solr.StandardTokenizerFactory\"/>\n            <filter class=\"solr.LowerCaseFilterFactory\"/>\n            <filter class=\"solr.StopFilterFactory\" ignoreCase=\"true\" words=\"lang/stopwords_hy.txt\" />\n            <filter class=\"solr.SnowballPorterFilterFactory\" language=\"Armenian\"/>\n        </analyzer>\n    </fieldType>\n\n    <!-- Indonesian -->\n    <dynamicField name=\"*_txt_id\" type=\"text_id\"  indexed=\"true\"  stored=\"true\"/>\n    <fieldType name=\"text_id\" class=\"solr.TextField\" positionIncrementGap=\"100\">\n        <analyzer>\n            <tokenizer class=\"solr.StandardTokenizerFactory\"/>\n            <filter class=\"solr.LowerCaseFilterFactory\"/>\n            <filter class=\"solr.StopFilterFactory\" ignoreCase=\"true\" words=\"lang/stopwords_id.txt\" />\n            <!-- for a less aggressive approach (only inflectional suffixes), set stemDerivational to false -->\n            <filter class=\"solr.IndonesianStemFilterFactory\" stemDerivational=\"true\"/>\n        </analyzer>\n    </fieldType>\n\n    <!-- Italian -->\n    <dynamicField name=\"*_txt_it\" type=\"text_it\"  indexed=\"true\"  stored=\"true\"/>\n    <fieldType name=\"text_it\" class=\"solr.TextField\" positionIncrementGap=\"100\">\n        <analyzer>\n            <tokenizer class=\"solr.StandardTokenizerFactory\"/>\n            <!-- removes l', etc -->\n            <filter class=\"solr.ElisionFilterFactory\" ignoreCase=\"true\" articles=\"lang/contractions_it.txt\"/>\n            <filter class=\"solr.LowerCaseFilterFactory\"/>\n            <filter class=\"solr.StopFilterFactory\" ignoreCase=\"true\" words=\"lang/stopwords_it.txt\" format=\"snowball\" />\n            <filter class=\"solr.ItalianLightStemFilterFactory\"/>\n            <!-- more aggressive: <filter class=\"solr.SnowballPorterFilterFactory\" language=\"Italian\"/> -->\n        </analyzer>\n    </fieldType>\n\n    <!-- Japanese using morphological analysis (see text_cjk for a configuration using bigramming)\n\n         NOTE: If you want to optimize search for precision, use default operator AND in your query\n         parser config with <solrQueryParser defaultOperator=\"AND\"/> further down in this file.  Use\n         OR if you would like to optimize for recall (default).\n    -->\n    <dynamicField name=\"*_txt_ja\" type=\"text_ja\"  indexed=\"true\"  stored=\"true\"/>\n    <fieldType name=\"text_ja\" class=\"solr.TextField\" positionIncrementGap=\"100\" autoGeneratePhraseQueries=\"false\">\n        <analyzer>\n            <!-- Kuromoji Japanese morphological analyzer/tokenizer (JapaneseTokenizer)\n\n               Kuromoji has a search mode (default) that does segmentation useful for search.  A heuristic\n               is used to segment compounds into its parts and the compound itself is kept as synonym.\n\n               Valid values for attribute mode are:\n                  normal: regular segmentation\n                  search: segmentation useful for search with synonyms compounds (default)\n                extended: same as search mode, but unigrams unknown words (experimental)\n\n               For some applications it might be good to use search mode for indexing and normal mode for\n               queries to reduce recall and prevent parts of compounds from being matched and highlighted.\n               Use <analyzer type=\"index\"> and <analyzer type=\"query\"> for this and mode normal in query.\n\n               Kuromoji also has a convenient user dictionary feature that allows overriding the statistical\n               model with your own entries for segmentation, part-of-speech tags and readings without a need\n               to specify weights.  Notice that user dictionaries have not been subject to extensive testing.\n\n               User dictionary attributes are:\n                         userDictionary: user dictionary filename\n                 userDictionaryEncoding: user dictionary encoding (default is UTF-8)\n\n               See lang/userdict_ja.txt for a sample user dictionary file.\n\n               Punctuation characters are discarded by default.  Use discardPunctuation=\"false\" to keep them.\n\n               See http://wiki.apache.org/solr/JapaneseLanguageSupport for more on Japanese language support.\n            -->\n            <tokenizer class=\"solr.JapaneseTokenizerFactory\" mode=\"search\"/>\n            <!--<tokenizer class=\"solr.JapaneseTokenizerFactory\" mode=\"search\" userDictionary=\"lang/userdict_ja.txt\"/>-->\n            <!-- Reduces inflected verbs and adjectives to their base/dictionary forms  -->\n            <filter class=\"solr.JapaneseBaseFormFilterFactory\"/>\n            <!-- Removes tokens with certain part-of-speech tags -->\n            <filter class=\"solr.JapanesePartOfSpeechStopFilterFactory\" tags=\"lang/stoptags_ja.txt\" />\n            <!-- Normalizes full-width romaji to half-width and half-width kana to full-width (Unicode NFKC subset) -->\n            <filter class=\"solr.CJKWidthFilterFactory\"/>\n            <!-- Removes common tokens typically not useful for search, but have a negative effect on ranking -->\n            <filter class=\"solr.StopFilterFactory\" ignoreCase=\"true\" words=\"lang/stopwords_ja.txt\" />\n            <!-- Normalizes common katakana spelling variations by removing any last long sound character (U+30FC) -->\n            <filter class=\"solr.JapaneseKatakanaStemFilterFactory\" minimumLength=\"4\"/>\n            <!-- Lower-cases romaji characters -->\n            <filter class=\"solr.LowerCaseFilterFactory\"/>\n        </analyzer>\n    </fieldType>\n\n    <!-- Latvian -->\n    <dynamicField name=\"*_txt_lv\" type=\"text_lv\"  indexed=\"true\"  stored=\"true\"/>\n    <fieldType name=\"text_lv\" class=\"solr.TextField\" positionIncrementGap=\"100\">\n        <analyzer>\n            <tokenizer class=\"solr.StandardTokenizerFactory\"/>\n            <filter class=\"solr.LowerCaseFilterFactory\"/>\n            <filter class=\"solr.StopFilterFactory\" ignoreCase=\"true\" words=\"lang/stopwords_lv.txt\" />\n            <filter class=\"solr.LatvianStemFilterFactory\"/>\n        </analyzer>\n    </fieldType>\n\n    <!-- Dutch -->\n    <dynamicField name=\"*_txt_nl\" type=\"text_nl\"  indexed=\"true\"  stored=\"true\"/>\n    <fieldType name=\"text_nl\" class=\"solr.TextField\" positionIncrementGap=\"100\">\n        <analyzer>\n            <tokenizer class=\"solr.StandardTokenizerFactory\"/>\n            <filter class=\"solr.LowerCaseFilterFactory\"/>\n            <filter class=\"solr.StopFilterFactory\" ignoreCase=\"true\" words=\"lang/stopwords_nl.txt\" format=\"snowball\" />\n            <filter class=\"solr.StemmerOverrideFilterFactory\" dictionary=\"lang/stemdict_nl.txt\" ignoreCase=\"false\"/>\n            <filter class=\"solr.SnowballPorterFilterFactory\" language=\"Dutch\"/>\n        </analyzer>\n    </fieldType>\n\n    <!-- Norwegian -->\n    <dynamicField name=\"*_txt_no\" type=\"text_no\"  indexed=\"true\"  stored=\"true\"/>\n    <fieldType name=\"text_no\" class=\"solr.TextField\" positionIncrementGap=\"100\">\n        <analyzer>\n            <tokenizer class=\"solr.StandardTokenizerFactory\"/>\n            <filter class=\"solr.LowerCaseFilterFactory\"/>\n            <filter class=\"solr.StopFilterFactory\" ignoreCase=\"true\" words=\"lang/stopwords_no.txt\" format=\"snowball\" />\n            <filter class=\"solr.SnowballPorterFilterFactory\" language=\"Norwegian\"/>\n            <!-- less aggressive: <filter class=\"solr.NorwegianLightStemFilterFactory\"/> -->\n            <!-- singular/plural: <filter class=\"solr.NorwegianMinimalStemFilterFactory\"/> -->\n        </analyzer>\n    </fieldType>\n\n    <!-- Portuguese -->\n    <dynamicField name=\"*_txt_pt\" type=\"text_pt\"  indexed=\"true\"  stored=\"true\"/>\n    <fieldType name=\"text_pt\" class=\"solr.TextField\" positionIncrementGap=\"100\">\n        <analyzer>\n            <tokenizer class=\"solr.StandardTokenizerFactory\"/>\n            <filter class=\"solr.LowerCaseFilterFactory\"/>\n            <filter class=\"solr.StopFilterFactory\" ignoreCase=\"true\" words=\"lang/stopwords_pt.txt\" format=\"snowball\" />\n            <filter class=\"solr.PortugueseLightStemFilterFactory\"/>\n            <!-- less aggressive: <filter class=\"solr.PortugueseMinimalStemFilterFactory\"/> -->\n            <!-- more aggressive: <filter class=\"solr.SnowballPorterFilterFactory\" language=\"Portuguese\"/> -->\n            <!-- most aggressive: <filter class=\"solr.PortugueseStemFilterFactory\"/> -->\n        </analyzer>\n    </fieldType>\n\n    <!-- Romanian -->\n    <dynamicField name=\"*_txt_ro\" type=\"text_ro\"  indexed=\"true\"  stored=\"true\"/>\n    <fieldType name=\"text_ro\" class=\"solr.TextField\" positionIncrementGap=\"100\">\n        <analyzer>\n            <tokenizer class=\"solr.StandardTokenizerFactory\"/>\n            <filter class=\"solr.LowerCaseFilterFactory\"/>\n            <filter class=\"solr.StopFilterFactory\" ignoreCase=\"true\" words=\"lang/stopwords_ro.txt\" />\n            <filter class=\"solr.SnowballPorterFilterFactory\" language=\"Romanian\"/>\n        </analyzer>\n    </fieldType>\n\n    <!-- Russian -->\n    <dynamicField name=\"*_txt_ru\" type=\"text_ru\"  indexed=\"true\"  stored=\"true\"/>\n    <fieldType name=\"text_ru\" class=\"solr.TextField\" positionIncrementGap=\"100\">\n        <analyzer>\n            <tokenizer class=\"solr.StandardTokenizerFactory\"/>\n            <filter class=\"solr.LowerCaseFilterFactory\"/>\n            <filter class=\"solr.StopFilterFactory\" ignoreCase=\"true\" words=\"lang/stopwords_ru.txt\" format=\"snowball\" />\n            <filter class=\"solr.SnowballPorterFilterFactory\" language=\"Russian\"/>\n            <!-- less aggressive: <filter class=\"solr.RussianLightStemFilterFactory\"/> -->\n        </analyzer>\n    </fieldType>\n\n    <!-- Swedish -->\n    <dynamicField name=\"*_txt_sv\" type=\"text_sv\"  indexed=\"true\"  stored=\"true\"/>\n    <fieldType name=\"text_sv\" class=\"solr.TextField\" positionIncrementGap=\"100\">\n        <analyzer>\n            <tokenizer class=\"solr.StandardTokenizerFactory\"/>\n            <filter class=\"solr.LowerCaseFilterFactory\"/>\n            <filter class=\"solr.StopFilterFactory\" ignoreCase=\"true\" words=\"lang/stopwords_sv.txt\" format=\"snowball\" />\n            <filter class=\"solr.SnowballPorterFilterFactory\" language=\"Swedish\"/>\n            <!-- less aggressive: <filter class=\"solr.SwedishLightStemFilterFactory\"/> -->\n        </analyzer>\n    </fieldType>\n\n    <!-- Thai -->\n    <dynamicField name=\"*_txt_th\" type=\"text_th\"  indexed=\"true\"  stored=\"true\"/>\n    <fieldType name=\"text_th\" class=\"solr.TextField\" positionIncrementGap=\"100\">\n        <analyzer>\n            <tokenizer class=\"solr.ThaiTokenizerFactory\"/>\n            <filter class=\"solr.LowerCaseFilterFactory\"/>\n            <filter class=\"solr.StopFilterFactory\" ignoreCase=\"true\" words=\"lang/stopwords_th.txt\" />\n        </analyzer>\n    </fieldType>\n\n    <!-- Turkish -->\n    <dynamicField name=\"*_txt_tr\" type=\"text_tr\"  indexed=\"true\"  stored=\"true\"/>\n    <fieldType name=\"text_tr\" class=\"solr.TextField\" positionIncrementGap=\"100\">\n        <analyzer>\n            <tokenizer class=\"solr.StandardTokenizerFactory\"/>\n            <filter class=\"solr.TurkishLowerCaseFilterFactory\"/>\n            <filter class=\"solr.StopFilterFactory\" ignoreCase=\"false\" words=\"lang/stopwords_tr.txt\" />\n            <filter class=\"solr.SnowballPorterFilterFactory\" language=\"Turkish\"/>\n        </analyzer>\n    </fieldType>\n\n    <!-- Similarity is the scoring routine for each document vs. a query.\n       A custom Similarity or SimilarityFactory may be specified here, but\n       the default is fine for most applications.\n       For more info: http://wiki.apache.org/solr/SchemaXml#Similarity\n    -->\n    <!--\n     <similarity class=\"com.example.solr.CustomSimilarityFactory\">\n       <str name=\"paramkey\">param value</str>\n     </similarity>\n    -->\n\n</schema>\n"
  },
  {
    "path": "haystack/templates/search_configuration/solrconfig.xml",
    "content": "<?xml version=\"1.0\" encoding=\"UTF-8\" ?>\n<!--\n Licensed to the Apache Software Foundation (ASF) under one or more\n contributor license agreements.  See the NOTICE file distributed with\n this work for additional information regarding copyright ownership.\n The ASF licenses this file to You under the Apache License, Version 2.0\n (the \"License\"); you may not use this file except in compliance with\n the License.  You may obtain a copy of the License at\n\n     http://www.apache.org/licenses/LICENSE-2.0\n\n Unless required by applicable law or agreed to in writing, software\n distributed under the License is distributed on an \"AS IS\" BASIS,\n WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n See the License for the specific language governing permissions and\n limitations under the License.\n-->\n\n<!--\n     For more details about configurations options that may appear in\n     this file, see http://wiki.apache.org/solr/SolrConfigXml.\n-->\n<config>\n  <!-- In all configuration below, a prefix of \"solr.\" for class names\n       is an alias that causes solr to search appropriate packages,\n       including org.apache.solr.(search|update|request|core|analysis)\n\n       You may also specify a fully qualified Java classname if you\n       have your own custom plugins.\n    -->\n\n  <!-- Controls what version of Lucene various components of Solr\n       adhere to.  Generally, you want to use the latest version to\n       get all bug fixes and improvements. It is highly recommended\n       that you fully re-index after changing this setting as it can\n       affect both how text is indexed and queried.\n  -->\n  <luceneMatchVersion>6.5.0</luceneMatchVersion>\n  <schemaFactory class=\"ClassicIndexSchemaFactory\"/>\n\n  <!-- <lib/> directives can be used to instruct Solr to load any Jars\n       identified and use them to resolve any \"plugins\" specified in\n       your solrconfig.xml or schema.xml (ie: Analyzers, Request\n       Handlers, etc...).\n\n       All directories and paths are resolved relative to the\n       instanceDir.\n\n       Please note that <lib/> directives are processed in the order\n       that they appear in your solrconfig.xml file, and are \"stacked\"\n       on top of each other when building a ClassLoader - so if you have\n       plugin jars with dependencies on other jars, the \"lower level\"\n       dependency jars should be loaded first.\n\n       If a \"./lib\" directory exists in your instanceDir, all files\n       found in it are included as if you had used the following\n       syntax...\n\n              <lib dir=\"./lib\" />\n    -->\n\n  <!-- A 'dir' option by itself adds any files found in the directory\n       to the classpath, this is useful for including all jars in a\n       directory.\n\n       When a 'regex' is specified in addition to a 'dir', only the\n       files in that directory which completely match the regex\n       (anchored on both ends) will be included.\n\n       If a 'dir' option (with or without a regex) is used and nothing\n       is found that matches, a warning will be logged.\n\n       The examples below can be used to load some solr-contribs along\n       with their external dependencies.\n    -->\n  <lib dir=\"${solr.install.dir:../../../..}/contrib/extraction/lib\" regex=\".*\\.jar\" />\n  <lib dir=\"${solr.install.dir:../../../..}/dist/\" regex=\"solr-cell-\\d.*\\.jar\" />\n\n  <lib dir=\"${solr.install.dir:../../../..}/contrib/clustering/lib/\" regex=\".*\\.jar\" />\n  <lib dir=\"${solr.install.dir:../../../..}/dist/\" regex=\"solr-clustering-\\d.*\\.jar\" />\n\n  <lib dir=\"${solr.install.dir:../../../..}/contrib/langid/lib/\" regex=\".*\\.jar\" />\n  <lib dir=\"${solr.install.dir:../../../..}/dist/\" regex=\"solr-langid-\\d.*\\.jar\" />\n\n  <lib dir=\"${solr.install.dir:../../../..}/contrib/velocity/lib\" regex=\".*\\.jar\" />\n  <lib dir=\"${solr.install.dir:../../../..}/dist/\" regex=\"solr-velocity-\\d.*\\.jar\" />\n  <!-- an exact 'path' can be used instead of a 'dir' to specify a\n       specific jar file.  This will cause a serious error to be logged\n       if it can't be loaded.\n    -->\n  <!--\n     <lib path=\"../a-jar-that-does-not-exist.jar\" />\n  -->\n\n  <!-- Data Directory\n\n       Used to specify an alternate directory to hold all index data\n       other than the default ./data under the Solr home.  If\n       replication is in use, this should match the replication\n       configuration.\n    -->\n  <dataDir>${solr.data.dir:}</dataDir>\n\n\n  <!-- The DirectoryFactory to use for indexes.\n\n       solr.StandardDirectoryFactory is filesystem\n       based and tries to pick the best implementation for the current\n       JVM and platform.  solr.NRTCachingDirectoryFactory, the default,\n       wraps solr.StandardDirectoryFactory and caches small files in memory\n       for better NRT performance.\n\n       One can force a particular implementation via solr.MMapDirectoryFactory,\n       solr.NIOFSDirectoryFactory, or solr.SimpleFSDirectoryFactory.\n\n       solr.RAMDirectoryFactory is memory based, not\n       persistent, and doesn't work with replication.\n    -->\n  <directoryFactory name=\"DirectoryFactory\"\n                    class=\"${solr.directoryFactory:solr.NRTCachingDirectoryFactory}\"/>\n\n  <!-- The CodecFactory for defining the format of the inverted index.\n       The default implementation is SchemaCodecFactory, which is the official Lucene\n       index format, but hooks into the schema to provide per-field customization of\n       the postings lists and per-document values in the fieldType element\n       (postingsFormat/docValuesFormat). Note that most of the alternative implementations\n       are experimental, so if you choose to customize the index format, it's a good\n       idea to convert back to the official format e.g. via IndexWriter.addIndexes(IndexReader)\n       before upgrading to a newer version to avoid unnecessary reindexing.\n       A \"compressionMode\" string element can be added to <codecFactory> to choose\n       between the existing compression modes in the default codec: \"BEST_SPEED\" (default)\n       or \"BEST_COMPRESSION\".\n  -->\n  <codecFactory class=\"solr.SchemaCodecFactory\"/>\n\n  <!-- ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~\n       Index Config - These settings control low-level behavior of indexing\n       Most example settings here show the default value, but are commented\n       out, to more easily see where customizations have been made.\n\n       Note: This replaces <indexDefaults> and <mainIndex> from older versions\n       ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ -->\n  <indexConfig>\n    <!-- maxFieldLength was removed in 4.0. To get similar behavior, include a\n         LimitTokenCountFilterFactory in your fieldType definition. E.g.\n     <filter class=\"solr.LimitTokenCountFilterFactory\" maxTokenCount=\"10000\"/>\n    -->\n    <!-- Maximum time to wait for a write lock (ms) for an IndexWriter. Default: 1000 -->\n    <!-- <writeLockTimeout>1000</writeLockTimeout>  -->\n\n    <!-- Expert: Enabling compound file will use less files for the index,\n         using fewer file descriptors on the expense of performance decrease.\n         Default in Lucene is \"true\". Default in Solr is \"false\" (since 3.6) -->\n    <!-- <useCompoundFile>false</useCompoundFile> -->\n\n    <!-- ramBufferSizeMB sets the amount of RAM that may be used by Lucene\n         indexing for buffering added documents and deletions before they are\n         flushed to the Directory.\n         maxBufferedDocs sets a limit on the number of documents buffered\n         before flushing.\n         If both ramBufferSizeMB and maxBufferedDocs is set, then\n         Lucene will flush based on whichever limit is hit first.  -->\n    <!-- <ramBufferSizeMB>100</ramBufferSizeMB> -->\n    <!-- <maxBufferedDocs>1000</maxBufferedDocs> -->\n\n    <!-- Expert: Merge Policy\n         The Merge Policy in Lucene controls how merging of segments is done.\n         The default since Solr/Lucene 3.3 is TieredMergePolicy.\n         The default since Lucene 2.3 was the LogByteSizeMergePolicy,\n         Even older versions of Lucene used LogDocMergePolicy.\n      -->\n    <!--\n        <mergePolicyFactory class=\"org.apache.solr.index.TieredMergePolicyFactory\">\n          <int name=\"maxMergeAtOnce\">10</int>\n          <int name=\"segmentsPerTier\">10</int>\n          <double name=\"noCFSRatio\">0.1</double>\n        </mergePolicyFactory>\n      -->\n\n    <!-- Expert: Merge Scheduler\n         The Merge Scheduler in Lucene controls how merges are\n         performed.  The ConcurrentMergeScheduler (Lucene 2.3 default)\n         can perform merges in the background using separate threads.\n         The SerialMergeScheduler (Lucene 2.2 default) does not.\n     -->\n    <!--\n       <mergeScheduler class=\"org.apache.lucene.index.ConcurrentMergeScheduler\"/>\n       -->\n\n    <!-- LockFactory\n\n         This option specifies which Lucene LockFactory implementation\n         to use.\n\n         single = SingleInstanceLockFactory - suggested for a\n                  read-only index or when there is no possibility of\n                  another process trying to modify the index.\n         native = NativeFSLockFactory - uses OS native file locking.\n                  Do not use when multiple solr webapps in the same\n                  JVM are attempting to share a single index.\n         simple = SimpleFSLockFactory  - uses a plain file for locking\n\n         Defaults: 'native' is default for Solr3.6 and later, otherwise\n                   'simple' is the default\n\n         More details on the nuances of each LockFactory...\n         http://wiki.apache.org/lucene-java/AvailableLockFactories\n    -->\n    <lockType>${solr.lock.type:native}</lockType>\n\n    <!-- Commit Deletion Policy\n         Custom deletion policies can be specified here. The class must\n         implement org.apache.lucene.index.IndexDeletionPolicy.\n\n         The default Solr IndexDeletionPolicy implementation supports\n         deleting index commit points on number of commits, age of\n         commit point and optimized status.\n\n         The latest commit point should always be preserved regardless\n         of the criteria.\n    -->\n    <!--\n    <deletionPolicy class=\"solr.SolrDeletionPolicy\">\n    -->\n    <!-- The number of commit points to be kept -->\n    <!-- <str name=\"maxCommitsToKeep\">1</str> -->\n    <!-- The number of optimized commit points to be kept -->\n    <!-- <str name=\"maxOptimizedCommitsToKeep\">0</str> -->\n    <!--\n        Delete all commit points once they have reached the given age.\n        Supports DateMathParser syntax e.g.\n      -->\n    <!--\n       <str name=\"maxCommitAge\">30MINUTES</str>\n       <str name=\"maxCommitAge\">1DAY</str>\n    -->\n    <!--\n    </deletionPolicy>\n    -->\n\n    <!-- Lucene Infostream\n\n         To aid in advanced debugging, Lucene provides an \"InfoStream\"\n         of detailed information when indexing.\n\n         Setting The value to true will instruct the underlying Lucene\n         IndexWriter to write its debugging info the specified file\n      -->\n    <!-- <infoStream file=\"INFOSTREAM.txt\">false</infoStream> -->\n  </indexConfig>\n\n\n  <!-- JMX\n\n       This example enables JMX if and only if an existing MBeanServer\n       is found, use this if you want to configure JMX through JVM\n       parameters. Remove this to disable exposing Solr configuration\n       and statistics to JMX.\n\n       For more details see http://wiki.apache.org/solr/SolrJmx\n    -->\n  <jmx />\n  <!-- If you want to connect to a particular server, specify the\n       agentId\n    -->\n  <!-- <jmx agentId=\"myAgent\" /> -->\n  <!-- If you want to start a new MBeanServer, specify the serviceUrl -->\n  <!-- <jmx serviceUrl=\"service:jmx:rmi:///jndi/rmi://localhost:9999/solr\"/>\n    -->\n\n  <!-- The default high-performance update handler -->\n  <updateHandler class=\"solr.DirectUpdateHandler2\">\n\n    <!-- Enables a transaction log, used for real-time get, durability, and\n         and solr cloud replica recovery.  The log can grow as big as\n         uncommitted changes to the index, so use of a hard autoCommit\n         is recommended (see below).\n         \"dir\" - the target directory for transaction logs, defaults to the\n                solr data directory.\n         \"numVersionBuckets\" - sets the number of buckets used to keep\n                track of max version values when checking for re-ordered\n                updates; increase this value to reduce the cost of\n                synchronizing access to version buckets during high-volume\n                indexing, this requires 8 bytes (long) * numVersionBuckets\n                of heap space per Solr core.\n    -->\n    <updateLog>\n      <str name=\"dir\">${solr.ulog.dir:}</str>\n      <int name=\"numVersionBuckets\">${solr.ulog.numVersionBuckets:65536}</int>\n    </updateLog>\n\n    <!-- AutoCommit\n\n         Perform a hard commit automatically under certain conditions.\n         Instead of enabling autoCommit, consider using \"commitWithin\"\n         when adding documents.\n\n         http://wiki.apache.org/solr/UpdateXmlMessages\n\n         maxDocs - Maximum number of documents to add since the last\n                   commit before automatically triggering a new commit.\n\n         maxTime - Maximum amount of time in ms that is allowed to pass\n                   since a document was added before automatically\n                   triggering a new commit.\n         openSearcher - if false, the commit causes recent index changes\n           to be flushed to stable storage, but does not cause a new\n           searcher to be opened to make those changes visible.\n\n         If the updateLog is enabled, then it's highly recommended to\n         have some sort of hard autoCommit to limit the log size.\n      -->\n    <autoCommit>\n      <maxTime>${solr.autoCommit.maxTime:15000}</maxTime>\n      <openSearcher>false</openSearcher>\n    </autoCommit>\n\n    <!-- softAutoCommit is like autoCommit except it causes a\n         'soft' commit which only ensures that changes are visible\n         but does not ensure that data is synced to disk.  This is\n         faster and more near-realtime friendly than a hard commit.\n      -->\n\n    <autoSoftCommit>\n      <maxTime>${solr.autoSoftCommit.maxTime:-1}</maxTime>\n    </autoSoftCommit>\n\n    <!-- Update Related Event Listeners\n\n         Various IndexWriter related events can trigger Listeners to\n         take actions.\n\n         postCommit - fired after every commit or optimize command\n         postOptimize - fired after every optimize command\n      -->\n    <!-- The RunExecutableListener executes an external command from a\n         hook such as postCommit or postOptimize.\n\n         exe - the name of the executable to run\n         dir - dir to use as the current working directory. (default=\".\")\n         wait - the calling thread waits until the executable returns.\n                (default=\"true\")\n         args - the arguments to pass to the program.  (default is none)\n         env - environment variables to set.  (default is none)\n      -->\n    <!-- This example shows how RunExecutableListener could be used\n         with the script based replication...\n         http://wiki.apache.org/solr/CollectionDistribution\n      -->\n    <!--\n       <listener event=\"postCommit\" class=\"solr.RunExecutableListener\">\n         <str name=\"exe\">solr/bin/snapshooter</str>\n         <str name=\"dir\">.</str>\n         <bool name=\"wait\">true</bool>\n         <arr name=\"args\"> <str>arg1</str> <str>arg2</str> </arr>\n         <arr name=\"env\"> <str>MYVAR=val1</str> </arr>\n       </listener>\n      -->\n\n  </updateHandler>\n\n  <!-- IndexReaderFactory\n\n       Use the following format to specify a custom IndexReaderFactory,\n       which allows for alternate IndexReader implementations.\n\n       ** Experimental Feature **\n\n       Please note - Using a custom IndexReaderFactory may prevent\n       certain other features from working. The API to\n       IndexReaderFactory may change without warning or may even be\n       removed from future releases if the problems cannot be\n       resolved.\n\n\n       ** Features that may not work with custom IndexReaderFactory **\n\n       The ReplicationHandler assumes a disk-resident index. Using a\n       custom IndexReader implementation may cause incompatibility\n       with ReplicationHandler and may cause replication to not work\n       correctly. See SOLR-1366 for details.\n\n    -->\n  <!--\n  <indexReaderFactory name=\"IndexReaderFactory\" class=\"package.class\">\n    <str name=\"someArg\">Some Value</str>\n  </indexReaderFactory >\n  -->\n\n  <!-- ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~\n       Query section - these settings control query time things like caches\n       ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ -->\n  <query>\n    <!-- Max Boolean Clauses\n\n         Maximum number of clauses in each BooleanQuery,  an exception\n         is thrown if exceeded.\n\n         ** WARNING **\n\n         This option actually modifies a global Lucene property that\n         will affect all SolrCores.  If multiple solrconfig.xml files\n         disagree on this property, the value at any given moment will\n         be based on the last SolrCore to be initialized.\n\n      -->\n    <maxBooleanClauses>1024</maxBooleanClauses>\n\n\n    <!-- Solr Internal Query Caches\n\n         There are two implementations of cache available for Solr,\n         LRUCache, based on a synchronized LinkedHashMap, and\n         FastLRUCache, based on a ConcurrentHashMap.\n\n         FastLRUCache has faster gets and slower puts in single\n         threaded operation and thus is generally faster than LRUCache\n         when the hit ratio of the cache is high (> 75%), and may be\n         faster under other scenarios on multi-cpu systems.\n    -->\n\n    <!-- Filter Cache\n\n         Cache used by SolrIndexSearcher for filters (DocSets),\n         unordered sets of *all* documents that match a query.  When a\n         new searcher is opened, its caches may be prepopulated or\n         \"autowarmed\" using data from caches in the old searcher.\n         autowarmCount is the number of items to prepopulate.  For\n         LRUCache, the autowarmed items will be the most recently\n         accessed items.\n\n         Parameters:\n           class - the SolrCache implementation LRUCache or\n               (LRUCache or FastLRUCache)\n           size - the maximum number of entries in the cache\n           initialSize - the initial capacity (number of entries) of\n               the cache.  (see java.util.HashMap)\n           autowarmCount - the number of entries to prepopulate from\n               and old cache.\n           maxRamMB - the maximum amount of RAM (in MB) that this cache is allowed\n                      to occupy. Note that when this option is specified, the size\n                      and initialSize parameters are ignored.\n      -->\n    <filterCache class=\"solr.FastLRUCache\"\n                 size=\"512\"\n                 initialSize=\"512\"\n                 autowarmCount=\"0\"/>\n\n    <!-- Query Result Cache\n\n         Caches results of searches - ordered lists of document ids\n         (DocList) based on a query, a sort, and the range of documents requested.\n         Additional supported parameter by LRUCache:\n            maxRamMB - the maximum amount of RAM (in MB) that this cache is allowed\n                       to occupy\n      -->\n    <queryResultCache class=\"solr.LRUCache\"\n                      size=\"512\"\n                      initialSize=\"512\"\n                      autowarmCount=\"0\"/>\n\n    <!-- Document Cache\n\n         Caches Lucene Document objects (the stored fields for each\n         document).  Since Lucene internal document ids are transient,\n         this cache will not be autowarmed.\n      -->\n    <documentCache class=\"solr.LRUCache\"\n                   size=\"512\"\n                   initialSize=\"512\"\n                   autowarmCount=\"0\"/>\n\n    <!-- custom cache currently used by block join -->\n    <cache name=\"perSegFilter\"\n           class=\"solr.search.LRUCache\"\n           size=\"10\"\n           initialSize=\"0\"\n           autowarmCount=\"10\"\n           regenerator=\"solr.NoOpRegenerator\" />\n\n    <!-- Field Value Cache\n\n         Cache used to hold field values that are quickly accessible\n         by document id.  The fieldValueCache is created by default\n         even if not configured here.\n      -->\n    <!--\n       <fieldValueCache class=\"solr.FastLRUCache\"\n                        size=\"512\"\n                        autowarmCount=\"128\"\n                        showItems=\"32\" />\n      -->\n\n    <!-- Custom Cache\n\n         Example of a generic cache.  These caches may be accessed by\n         name through SolrIndexSearcher.getCache(),cacheLookup(), and\n         cacheInsert().  The purpose is to enable easy caching of\n         user/application level data.  The regenerator argument should\n         be specified as an implementation of solr.CacheRegenerator\n         if autowarming is desired.\n      -->\n    <!--\n       <cache name=\"myUserCache\"\n              class=\"solr.LRUCache\"\n              size=\"4096\"\n              initialSize=\"1024\"\n              autowarmCount=\"1024\"\n              regenerator=\"com.mycompany.MyRegenerator\"\n              />\n      -->\n\n\n    <!-- Lazy Field Loading\n\n         If true, stored fields that are not requested will be loaded\n         lazily.  This can result in a significant speed improvement\n         if the usual case is to not load all stored fields,\n         especially if the skipped fields are large compressed text\n         fields.\n    -->\n    <enableLazyFieldLoading>true</enableLazyFieldLoading>\n\n    <!-- Use Filter For Sorted Query\n\n         A possible optimization that attempts to use a filter to\n         satisfy a search.  If the requested sort does not include\n         score, then the filterCache will be checked for a filter\n         matching the query. If found, the filter will be used as the\n         source of document ids, and then the sort will be applied to\n         that.\n\n         For most situations, this will not be useful unless you\n         frequently get the same search repeatedly with different sort\n         options, and none of them ever use \"score\"\n      -->\n    <!--\n       <useFilterForSortedQuery>true</useFilterForSortedQuery>\n      -->\n\n    <!-- Result Window Size\n\n         An optimization for use with the queryResultCache.  When a search\n         is requested, a superset of the requested number of document ids\n         are collected.  For example, if a search for a particular query\n         requests matching documents 10 through 19, and queryWindowSize is 50,\n         then documents 0 through 49 will be collected and cached.  Any further\n         requests in that range can be satisfied via the cache.\n      -->\n    <queryResultWindowSize>20</queryResultWindowSize>\n\n    <!-- Maximum number of documents to cache for any entry in the\n         queryResultCache.\n      -->\n    <queryResultMaxDocsCached>200</queryResultMaxDocsCached>\n\n    <!-- Query Related Event Listeners\n\n         Various IndexSearcher related events can trigger Listeners to\n         take actions.\n\n         newSearcher - fired whenever a new searcher is being prepared\n         and there is a current searcher handling requests (aka\n         registered).  It can be used to prime certain caches to\n         prevent long request times for certain requests.\n\n         firstSearcher - fired whenever a new searcher is being\n         prepared but there is no current registered searcher to handle\n         requests or to gain autowarming data from.\n\n\n      -->\n    <!-- QuerySenderListener takes an array of NamedList and executes a\n         local query request for each NamedList in sequence.\n      -->\n    <listener event=\"newSearcher\" class=\"solr.QuerySenderListener\">\n      <arr name=\"queries\">\n        <!--\n           <lst><str name=\"q\">solr</str><str name=\"sort\">price asc</str></lst>\n           <lst><str name=\"q\">rocks</str><str name=\"sort\">weight asc</str></lst>\n          -->\n      </arr>\n    </listener>\n    <listener event=\"firstSearcher\" class=\"solr.QuerySenderListener\">\n      <arr name=\"queries\">\n        <!--\n        <lst>\n          <str name=\"q\">static firstSearcher warming in solrconfig.xml</str>\n        </lst>\n        -->\n      </arr>\n    </listener>\n\n    <!-- Use Cold Searcher\n\n         If a search request comes in and there is no current\n         registered searcher, then immediately register the still\n         warming searcher and use it.  If \"false\" then all requests\n         will block until the first searcher is done warming.\n      -->\n    <useColdSearcher>false</useColdSearcher>\n\n  </query>\n\n\n  <!-- Request Dispatcher\n\n       This section contains instructions for how the SolrDispatchFilter\n       should behave when processing requests for this SolrCore.\n\n       handleSelect is a legacy option that affects the behavior of requests\n       such as /select?qt=XXX\n\n       handleSelect=\"true\" will cause the SolrDispatchFilter to process\n       the request and dispatch the query to a handler specified by the\n       \"qt\" param, assuming \"/select\" isn't already registered.\n\n       handleSelect=\"false\" will cause the SolrDispatchFilter to\n       ignore \"/select\" requests, resulting in a 404 unless a handler\n       is explicitly registered with the name \"/select\"\n\n       handleSelect=\"true\" is not recommended for new users, but is the default\n       for backwards compatibility\n    -->\n  <requestDispatcher handleSelect=\"false\" >\n    <!-- Request Parsing\n\n         These settings indicate how Solr Requests may be parsed, and\n         what restrictions may be placed on the ContentStreams from\n         those requests\n\n         enableRemoteStreaming - enables use of the stream.file\n         and stream.url parameters for specifying remote streams.\n\n         multipartUploadLimitInKB - specifies the max size (in KiB) of\n         Multipart File Uploads that Solr will allow in a Request.\n\n         formdataUploadLimitInKB - specifies the max size (in KiB) of\n         form data (application/x-www-form-urlencoded) sent via\n         POST. You can use POST to pass request parameters not\n         fitting into the URL.\n\n         addHttpRequestToContext - if set to true, it will instruct\n         the requestParsers to include the original HttpServletRequest\n         object in the context map of the SolrQueryRequest under the\n         key \"httpRequest\". It will not be used by any of the existing\n         Solr components, but may be useful when developing custom\n         plugins.\n\n         *** WARNING ***\n         The settings below authorize Solr to fetch remote files, You\n         should make sure your system has some authentication before\n         using enableRemoteStreaming=\"true\"\n\n      -->\n    <requestParsers enableRemoteStreaming=\"true\"\n                    multipartUploadLimitInKB=\"2048000\"\n                    formdataUploadLimitInKB=\"2048\"\n                    addHttpRequestToContext=\"false\"/>\n\n    <!-- HTTP Caching\n\n         Set HTTP caching related parameters (for proxy caches and clients).\n\n         The options below instruct Solr not to output any HTTP Caching\n         related headers\n      -->\n    <httpCaching never304=\"true\" />\n    <!-- If you include a <cacheControl> directive, it will be used to\n         generate a Cache-Control header (as well as an Expires header\n         if the value contains \"max-age=\")\n\n         By default, no Cache-Control header is generated.\n\n         You can use the <cacheControl> option even if you have set\n         never304=\"true\"\n      -->\n    <!--\n       <httpCaching never304=\"true\" >\n         <cacheControl>max-age=30, public</cacheControl>\n       </httpCaching>\n      -->\n    <!-- To enable Solr to respond with automatically generated HTTP\n         Caching headers, and to response to Cache Validation requests\n         correctly, set the value of never304=\"false\"\n\n         This will cause Solr to generate Last-Modified and ETag\n         headers based on the properties of the Index.\n\n         The following options can also be specified to affect the\n         values of these headers...\n\n         lastModFrom - the default value is \"openTime\" which means the\n         Last-Modified value (and validation against If-Modified-Since\n         requests) will all be relative to when the current Searcher\n         was opened.  You can change it to lastModFrom=\"dirLastMod\" if\n         you want the value to exactly correspond to when the physical\n         index was last modified.\n\n         etagSeed=\"...\" is an option you can change to force the ETag\n         header (and validation against If-None-Match requests) to be\n         different even if the index has not changed (ie: when making\n         significant changes to your config file)\n\n         (lastModifiedFrom and etagSeed are both ignored if you use\n         the never304=\"true\" option)\n      -->\n    <!--\n       <httpCaching lastModifiedFrom=\"openTime\"\n                    etagSeed=\"Solr\">\n         <cacheControl>max-age=30, public</cacheControl>\n       </httpCaching>\n      -->\n  </requestDispatcher>\n\n  <!-- Request Handlers\n\n       http://wiki.apache.org/solr/SolrRequestHandler\n\n       Incoming queries will be dispatched to a specific handler by name\n       based on the path specified in the request.\n\n       Legacy behavior: If the request path uses \"/select\" but no Request\n       Handler has that name, and if handleSelect=\"true\" has been specified in\n       the requestDispatcher, then the Request Handler is dispatched based on\n       the qt parameter.  Handlers without a leading '/' are accessed this way\n       like so: http://host/app/[core/]select?qt=name  If no qt is\n       given, then the requestHandler that declares default=\"true\" will be\n       used or the one named \"standard\".\n\n       If a Request Handler is declared with startup=\"lazy\", then it will\n       not be initialized until the first request that uses it.\n\n    -->\n  <!-- SearchHandler\n\n       http://wiki.apache.org/solr/SearchHandler\n\n       For processing Search Queries, the primary Request Handler\n       provided with Solr is \"SearchHandler\" It delegates to a sequent\n       of SearchComponents (see below) and supports distributed\n       queries across multiple shards\n    -->\n  <requestHandler name=\"/select\" class=\"solr.SearchHandler\">\n    <!-- default values for query parameters can be specified, these\n         will be overridden by parameters in the request\n      -->\n    <lst name=\"defaults\">\n      <str name=\"echoParams\">explicit</str>\n      <int name=\"rows\">10</int>\n      <!-- <str name=\"df\">text</str> -->\n      <str name=\"spellcheck.dictionary\">default</str>\n      <str name=\"spellcheck\">on</str>\n      <str name=\"spellcheck.extendedResults\">true</str>\n      <str name=\"spellcheck.count\">10</str>\n      <str name=\"spellcheck.alternativeTermCount\">5</str>\n      <str name=\"spellcheck.maxResultsForSuggest\">5</str>\n      <str name=\"spellcheck.collate\">true</str>\n      <str name=\"spellcheck.collateExtendedResults\">true</str>\n      <str name=\"spellcheck.maxCollationTries\">10</str>\n      <str name=\"spellcheck.maxCollations\">5</str>\n    </lst>\n    <!-- In addition to defaults, \"appends\" params can be specified\n         to identify values which should be appended to the list of\n         multi-val params from the query (or the existing \"defaults\").\n      -->\n    <!-- In this example, the param \"fq=instock:true\" would be appended to\n         any query time fq params the user may specify, as a mechanism for\n         partitioning the index, independent of any user selected filtering\n         that may also be desired (perhaps as a result of faceted searching).\n\n         NOTE: there is *absolutely* nothing a client can do to prevent these\n         \"appends\" values from being used, so don't use this mechanism\n         unless you are sure you always want it.\n      -->\n    <!--\n       <lst name=\"appends\">\n         <str name=\"fq\">inStock:true</str>\n       </lst>\n      -->\n    <!-- \"invariants\" are a way of letting the Solr maintainer lock down\n         the options available to Solr clients.  Any params values\n         specified here are used regardless of what values may be specified\n         in either the query, the \"defaults\", or the \"appends\" params.\n\n         In this example, the facet.field and facet.query params would\n         be fixed, limiting the facets clients can use.  Faceting is\n         not turned on by default - but if the client does specify\n         facet=true in the request, these are the only facets they\n         will be able to see counts for; regardless of what other\n         facet.field or facet.query params they may specify.\n\n         NOTE: there is *absolutely* nothing a client can do to prevent these\n         \"invariants\" values from being used, so don't use this mechanism\n         unless you are sure you always want it.\n      -->\n    <!--\n       <lst name=\"invariants\">\n         <str name=\"facet.field\">cat</str>\n         <str name=\"facet.field\">manu_exact</str>\n         <str name=\"facet.query\">price:[* TO 500]</str>\n         <str name=\"facet.query\">price:[500 TO *]</str>\n       </lst>\n      -->\n    <!-- If the default list of SearchComponents is not desired, that\n         list can either be overridden completely, or components can be\n         prepended or appended to the default list.  (see below)\n      -->\n    <!--\n       <arr name=\"components\">\n         <str>nameOfCustomComponent1</str>\n         <str>nameOfCustomComponent2</str>\n       </arr>\n      -->\n      <arr name=\"last-components\">\n        <str>spellcheck</str>\n      </arr>\n  </requestHandler>\n\n  <!-- A request handler that returns indented JSON by default -->\n  <requestHandler name=\"/query\" class=\"solr.SearchHandler\">\n    <lst name=\"defaults\">\n      <str name=\"echoParams\">explicit</str>\n      <str name=\"wt\">json</str>\n      <str name=\"indent\">true</str>\n    </lst>\n  </requestHandler>\n\n  <requestHandler name=\"/mlt\" class=\"solr.MoreLikeThisHandler\" />\n  <!-- A Robust Example\n\n       This example SearchHandler declaration shows off usage of the\n       SearchHandler with many defaults declared\n\n       Note that multiple instances of the same Request Handler\n       (SearchHandler) can be registered multiple times with different\n       names (and different init parameters)\n    -->\n  <requestHandler name=\"/browse\" class=\"solr.SearchHandler\" useParams=\"query,facets,velocity,browse\">\n    <lst name=\"defaults\">\n      <str name=\"echoParams\">explicit</str>\n    </lst>\n  </requestHandler>\n\n  <initParams path=\"/update/**,/query,/select,/tvrh,/elevate,/spell,/browse\">\n    <lst name=\"defaults\">\n      <str name=\"df\">{{ content_field_name }}</str>\n    </lst>\n  </initParams>\n\n  <initParams path=\"/update/**\">\n    <lst name=\"defaults\">\n      <str name=\"update.chain\">add-unknown-fields-to-the-schema</str>\n    </lst>\n  </initParams>\n\n  <!-- ping/healthcheck -->\n  <requestHandler name=\"/admin/ping\" class=\"solr.PingRequestHandler\">\n    <lst name=\"invariants\">\n      <str name=\"q\">solrpingquery</str>\n    </lst>\n    <lst name=\"defaults\">\n      <str name=\"echoParams\">all</str>\n    </lst>\n    <!-- An optional feature of the PingRequestHandler is to configure the\n         handler with a \"healthcheckFile\" which can be used to enable/disable\n         the PingRequestHandler.\n         relative paths are resolved against the data dir\n      -->\n    <!-- <str name=\"healthcheckFile\">server-enabled.txt</str> -->\n  </requestHandler>\n\n  <!-- Solr Cell Update Request Handler\n\n       http://wiki.apache.org/solr/ExtractingRequestHandler\n\n    -->\n  <requestHandler name=\"/update/extract\"\n                  startup=\"lazy\"\n                  class=\"solr.extraction.ExtractingRequestHandler\" >\n    <lst name=\"defaults\">\n      <str name=\"lowernames\">true</str>\n      <str name=\"fmap.meta\">ignored_</str>\n      <str name=\"fmap.content\">{{ content_field_name }}</str>\n    </lst>\n  </requestHandler>\n\n  <!-- Search Components\n\n       Search components are registered to SolrCore and used by\n       instances of SearchHandler (which can access them by name)\n\n       By default, the following components are available:\n\n       <searchComponent name=\"query\"     class=\"solr.QueryComponent\" />\n       <searchComponent name=\"facet\"     class=\"solr.FacetComponent\" />\n       <searchComponent name=\"mlt\"       class=\"solr.MoreLikeThisComponent\" />\n       <searchComponent name=\"highlight\" class=\"solr.HighlightComponent\" />\n       <searchComponent name=\"stats\"     class=\"solr.StatsComponent\" />\n       <searchComponent name=\"debug\"     class=\"solr.DebugComponent\" />\n\n       Default configuration in a requestHandler would look like:\n\n       <arr name=\"components\">\n         <str>query</str>\n         <str>facet</str>\n         <str>mlt</str>\n         <str>highlight</str>\n         <str>stats</str>\n         <str>debug</str>\n       </arr>\n\n       If you register a searchComponent to one of the standard names,\n       that will be used instead of the default.\n\n       To insert components before or after the 'standard' components, use:\n\n       <arr name=\"first-components\">\n         <str>myFirstComponentName</str>\n       </arr>\n\n       <arr name=\"last-components\">\n         <str>myLastComponentName</str>\n       </arr>\n\n       NOTE: The component registered with the name \"debug\" will\n       always be executed after the \"last-components\"\n\n     -->\n\n  <!-- Spell Check\n\n       The spell check component can return a list of alternative spelling\n       suggestions.\n\n       http://wiki.apache.org/solr/SpellCheckComponent\n    -->\n  <searchComponent name=\"spellcheck\" class=\"solr.SpellCheckComponent\">\n\n    <str name=\"queryAnalyzerFieldType\">text_en</str>\n\n    <!-- Multiple \"Spell Checkers\" can be declared and used by this\n         component\n      -->\n\n    <!-- a spellchecker built from a field of the main index -->\n    <lst name=\"spellchecker\">\n      <str name=\"name\">default</str>\n      <str name=\"field\">{{ content_field_name }}</str>\n      <str name=\"classname\">solr.DirectSolrSpellChecker</str>\n      <!-- the spellcheck distance measure used, the default is the internal levenshtein -->\n      <str name=\"distanceMeasure\">internal</str>\n      <!-- minimum accuracy needed to be considered a valid spellcheck suggestion -->\n      <float name=\"accuracy\">0.5</float>\n      <!-- the maximum #edits we consider when enumerating terms: can be 1 or 2 -->\n      <int name=\"maxEdits\">2</int>\n      <!-- the minimum shared prefix when enumerating terms -->\n      <int name=\"minPrefix\">1</int>\n      <!-- maximum number of inspections per result. -->\n      <int name=\"maxInspections\">5</int>\n      <!-- minimum length of a query term to be considered for correction -->\n      <int name=\"minQueryLength\">4</int>\n      <!-- maximum threshold of documents a query term can appear to be considered for correction -->\n      <float name=\"maxQueryFrequency\">0.01</float>\n      <!-- uncomment this to require suggestions to occur in 1% of the documents\n        <float name=\"thresholdTokenFrequency\">.01</float>\n      -->\n    </lst>\n\n    <!-- a spellchecker that can break or combine words.  See \"/spell\" handler below for usage -->\n    <!--\n    <lst name=\"spellchecker\">\n      <str name=\"name\">wordbreak</str>\n      <str name=\"classname\">solr.WordBreakSolrSpellChecker</str>\n      <str name=\"field\">name</str>\n      <str name=\"combineWords\">true</str>\n      <str name=\"breakWords\">true</str>\n      <int name=\"maxChanges\">10</int>\n    </lst>\n    -->\n  </searchComponent>\n\n  <!-- A request handler for demonstrating the spellcheck component.\n\n       NOTE: This is purely as an example.  The whole purpose of the\n       SpellCheckComponent is to hook it into the request handler that\n       handles your normal user queries so that a separate request is\n       not needed to get suggestions.\n\n       IN OTHER WORDS, THERE IS REALLY GOOD CHANCE THE SETUP BELOW IS\n       NOT WHAT YOU WANT FOR YOUR PRODUCTION SYSTEM!\n\n       See http://wiki.apache.org/solr/SpellCheckComponent for details\n       on the request parameters.\n    -->\n  <requestHandler name=\"/spell\" class=\"solr.SearchHandler\" startup=\"lazy\">\n    <lst name=\"defaults\">\n      <!-- Solr will use suggestions from both the 'default' spellchecker\n           and from the 'wordbreak' spellchecker and combine them.\n           collations (re-written queries) can include a combination of\n           corrections from both spellcheckers -->\n      <str name=\"spellcheck.dictionary\">default</str>\n      <str name=\"spellcheck\">on</str>\n      <str name=\"spellcheck.extendedResults\">true</str>\n      <str name=\"spellcheck.count\">10</str>\n      <str name=\"spellcheck.alternativeTermCount\">5</str>\n      <str name=\"spellcheck.maxResultsForSuggest\">5</str>\n      <str name=\"spellcheck.collate\">true</str>\n      <str name=\"spellcheck.collateExtendedResults\">true</str>\n      <str name=\"spellcheck.maxCollationTries\">10</str>\n      <str name=\"spellcheck.maxCollations\">5</str>\n    </lst>\n    <arr name=\"last-components\">\n      <str>spellcheck</str>\n    </arr>\n  </requestHandler>\n\n  <!-- Term Vector Component\n\n       http://wiki.apache.org/solr/TermVectorComponent\n    -->\n  <searchComponent name=\"tvComponent\" class=\"solr.TermVectorComponent\"/>\n\n  <!-- A request handler for demonstrating the term vector component\n\n       This is purely as an example.\n\n       In reality you will likely want to add the component to your\n       already specified request handlers.\n    -->\n  <requestHandler name=\"/tvrh\" class=\"solr.SearchHandler\" startup=\"lazy\">\n    <lst name=\"defaults\">\n      <bool name=\"tv\">true</bool>\n    </lst>\n    <arr name=\"last-components\">\n      <str>tvComponent</str>\n    </arr>\n  </requestHandler>\n\n  <!-- Clustering Component. (Omitted here. See the default Solr example for a typical configuration.) -->\n\n  <!-- Terms Component\n\n       http://wiki.apache.org/solr/TermsComponent\n\n       A component to return terms and document frequency of those\n       terms\n    -->\n  <searchComponent name=\"terms\" class=\"solr.TermsComponent\"/>\n\n  <!-- A request handler for demonstrating the terms component -->\n  <requestHandler name=\"/terms\" class=\"solr.SearchHandler\" startup=\"lazy\">\n    <lst name=\"defaults\">\n      <bool name=\"terms\">true</bool>\n      <bool name=\"distrib\">false</bool>\n    </lst>\n    <arr name=\"components\">\n      <str>terms</str>\n    </arr>\n  </requestHandler>\n\n\n  <!-- Query Elevation Component\n\n       http://wiki.apache.org/solr/QueryElevationComponent\n\n       a search component that enables you to configure the top\n       results for a given query regardless of the normal lucene\n       scoring.\n    -->\n  <searchComponent name=\"elevator\" class=\"solr.QueryElevationComponent\" >\n    <!-- pick a fieldType to analyze queries -->\n    <str name=\"queryFieldType\">string</str>\n    <str name=\"config-file\">elevate.xml</str>\n  </searchComponent>\n\n  <!-- A request handler for demonstrating the elevator component -->\n  <requestHandler name=\"/elevate\" class=\"solr.SearchHandler\" startup=\"lazy\">\n    <lst name=\"defaults\">\n      <str name=\"echoParams\">explicit</str>\n    </lst>\n    <arr name=\"last-components\">\n      <str>elevator</str>\n    </arr>\n  </requestHandler>\n\n  <!-- Highlighting Component\n\n       http://wiki.apache.org/solr/HighlightingParameters\n    -->\n  <searchComponent class=\"solr.HighlightComponent\" name=\"highlight\">\n    <highlighting>\n      <!-- Configure the standard fragmenter -->\n      <!-- This could most likely be commented out in the \"default\" case -->\n      <fragmenter name=\"gap\"\n                  default=\"true\"\n                  class=\"solr.highlight.GapFragmenter\">\n        <lst name=\"defaults\">\n          <int name=\"hl.fragsize\">100</int>\n        </lst>\n      </fragmenter>\n\n      <!-- A regular-expression-based fragmenter\n           (for sentence extraction)\n        -->\n      <fragmenter name=\"regex\"\n                  class=\"solr.highlight.RegexFragmenter\">\n        <lst name=\"defaults\">\n          <!-- slightly smaller fragsizes work better because of slop -->\n          <int name=\"hl.fragsize\">70</int>\n          <!-- allow 50% slop on fragment sizes -->\n          <float name=\"hl.regex.slop\">0.5</float>\n          <!-- a basic sentence pattern -->\n          <str name=\"hl.regex.pattern\">[-\\w ,/\\n\\&quot;&apos;]{20,200}</str>\n        </lst>\n      </fragmenter>\n\n      <!-- Configure the standard formatter -->\n      <formatter name=\"html\"\n                 default=\"true\"\n                 class=\"solr.highlight.HtmlFormatter\">\n        <lst name=\"defaults\">\n          <str name=\"hl.simple.pre\"><![CDATA[<em>]]></str>\n          <str name=\"hl.simple.post\"><![CDATA[</em>]]></str>\n        </lst>\n      </formatter>\n\n      <!-- Configure the standard encoder -->\n      <encoder name=\"html\"\n               class=\"solr.highlight.HtmlEncoder\" />\n\n      <!-- Configure the standard fragListBuilder -->\n      <fragListBuilder name=\"simple\"\n                       class=\"solr.highlight.SimpleFragListBuilder\"/>\n\n      <!-- Configure the single fragListBuilder -->\n      <fragListBuilder name=\"single\"\n                       class=\"solr.highlight.SingleFragListBuilder\"/>\n\n      <!-- Configure the weighted fragListBuilder -->\n      <fragListBuilder name=\"weighted\"\n                       default=\"true\"\n                       class=\"solr.highlight.WeightedFragListBuilder\"/>\n\n      <!-- default tag FragmentsBuilder -->\n      <fragmentsBuilder name=\"default\"\n                        default=\"true\"\n                        class=\"solr.highlight.ScoreOrderFragmentsBuilder\">\n        <!--\n        <lst name=\"defaults\">\n          <str name=\"hl.multiValuedSeparatorChar\">/</str>\n        </lst>\n        -->\n      </fragmentsBuilder>\n\n      <!-- multi-colored tag FragmentsBuilder -->\n      <fragmentsBuilder name=\"colored\"\n                        class=\"solr.highlight.ScoreOrderFragmentsBuilder\">\n        <lst name=\"defaults\">\n          <str name=\"hl.tag.pre\"><![CDATA[\n               <b style=\"background:yellow\">,<b style=\"background:lawgreen\">,\n               <b style=\"background:aquamarine\">,<b style=\"background:magenta\">,\n               <b style=\"background:palegreen\">,<b style=\"background:coral\">,\n               <b style=\"background:wheat\">,<b style=\"background:khaki\">,\n               <b style=\"background:lime\">,<b style=\"background:deepskyblue\">]]></str>\n          <str name=\"hl.tag.post\"><![CDATA[</b>]]></str>\n        </lst>\n      </fragmentsBuilder>\n\n      <boundaryScanner name=\"default\"\n                       default=\"true\"\n                       class=\"solr.highlight.SimpleBoundaryScanner\">\n        <lst name=\"defaults\">\n          <str name=\"hl.bs.maxScan\">10</str>\n          <str name=\"hl.bs.chars\">.,!? &#9;&#10;&#13;</str>\n        </lst>\n      </boundaryScanner>\n\n      <boundaryScanner name=\"breakIterator\"\n                       class=\"solr.highlight.BreakIteratorBoundaryScanner\">\n        <lst name=\"defaults\">\n          <!-- type should be one of CHARACTER, WORD(default), LINE and SENTENCE -->\n          <str name=\"hl.bs.type\">WORD</str>\n          <!-- language and country are used when constructing Locale object.  -->\n          <!-- And the Locale object will be used when getting instance of BreakIterator -->\n          <str name=\"hl.bs.language\">en</str>\n          <str name=\"hl.bs.country\">US</str>\n        </lst>\n      </boundaryScanner>\n    </highlighting>\n  </searchComponent>\n\n  <!-- Update Processors\n\n       Chains of Update Processor Factories for dealing with Update\n       Requests can be declared, and then used by name in Update\n       Request Processors\n\n       http://wiki.apache.org/solr/UpdateRequestProcessor\n\n    -->\n\n  <!-- Add unknown fields to the schema\n\n       An example field type guessing update processor that will\n       attempt to parse string-typed field values as Booleans, Longs,\n       Doubles, or Dates, and then add schema fields with the guessed\n       field types.\n\n       This requires that the schema is both managed and mutable, by\n       declaring schemaFactory as ManagedIndexSchemaFactory, with\n       mutable specified as true.\n\n       See http://wiki.apache.org/solr/GuessingFieldTypes\n    -->\n  <updateRequestProcessorChain name=\"add-unknown-fields-to-the-schema\">\n    <!-- UUIDUpdateProcessorFactory will generate an id if none is present in the incoming document -->\n    <processor class=\"solr.UUIDUpdateProcessorFactory\" />\n    <processor class=\"solr.RemoveBlankFieldUpdateProcessorFactory\"/>\n    <processor class=\"solr.FieldNameMutatingUpdateProcessorFactory\">\n      <str name=\"pattern\">[^\\w-\\.]</str>\n      <str name=\"replacement\">_</str>\n    </processor>\n    <processor class=\"solr.ParseBooleanFieldUpdateProcessorFactory\"/>\n    <processor class=\"solr.ParseLongFieldUpdateProcessorFactory\"/>\n    <processor class=\"solr.ParseDoubleFieldUpdateProcessorFactory\"/>\n    <processor class=\"solr.ParseDateFieldUpdateProcessorFactory\">\n      <arr name=\"format\">\n        <str>yyyy-MM-dd'T'HH:mm:ss.SSSZ</str>\n        <str>yyyy-MM-dd'T'HH:mm:ss,SSSZ</str>\n        <str>yyyy-MM-dd'T'HH:mm:ss.SSS</str>\n        <str>yyyy-MM-dd'T'HH:mm:ss,SSS</str>\n        <str>yyyy-MM-dd'T'HH:mm:ssZ</str>\n        <str>yyyy-MM-dd'T'HH:mm:ss</str>\n        <str>yyyy-MM-dd'T'HH:mmZ</str>\n        <str>yyyy-MM-dd'T'HH:mm</str>\n        <str>yyyy-MM-dd HH:mm:ss.SSSZ</str>\n        <str>yyyy-MM-dd HH:mm:ss,SSSZ</str>\n        <str>yyyy-MM-dd HH:mm:ss.SSS</str>\n        <str>yyyy-MM-dd HH:mm:ss,SSS</str>\n        <str>yyyy-MM-dd HH:mm:ssZ</str>\n        <str>yyyy-MM-dd HH:mm:ss</str>\n        <str>yyyy-MM-dd HH:mmZ</str>\n        <str>yyyy-MM-dd HH:mm</str>\n        <str>yyyy-MM-dd</str>\n      </arr>\n    </processor>\n    <!--<processor class=\"solr.AddSchemaFieldsUpdateProcessorFactory\">\n      <str name=\"defaultFieldType\">strings</str>\n      <lst name=\"typeMapping\">\n        <str name=\"valueClass\">java.lang.Boolean</str>\n        <str name=\"fieldType\">booleans</str>\n      </lst>\n      <lst name=\"typeMapping\">\n        <str name=\"valueClass\">java.util.Date</str>\n        <str name=\"fieldType\">tdates</str>\n      </lst>\n      <lst name=\"typeMapping\">\n        <str name=\"valueClass\">java.lang.Long</str>\n        <str name=\"valueClass\">java.lang.Integer</str>\n        <str name=\"fieldType\">tlongs</str>\n      </lst>\n      <lst name=\"typeMapping\">\n        <str name=\"valueClass\">java.lang.Number</str>\n        <str name=\"fieldType\">tdoubles</str>\n      </lst>\n    </processor>-->\n    <processor class=\"solr.LogUpdateProcessorFactory\"/>\n    <processor class=\"solr.DistributedUpdateProcessorFactory\"/>\n    <processor class=\"solr.RunUpdateProcessorFactory\"/>\n  </updateRequestProcessorChain>\n\n  <!-- Deduplication\n\n       An example dedup update processor that creates the \"id\" field\n       on the fly based on the hash code of some other fields.  This\n       example has overwriteDupes set to false since we are using the\n       id field as the signatureField and Solr will maintain\n       uniqueness based on that anyway.\n\n    -->\n  <!--\n     <updateRequestProcessorChain name=\"dedupe\">\n       <processor class=\"solr.processor.SignatureUpdateProcessorFactory\">\n         <bool name=\"enabled\">true</bool>\n         <str name=\"signatureField\">id</str>\n         <bool name=\"overwriteDupes\">false</bool>\n         <str name=\"fields\">name,features,cat</str>\n         <str name=\"signatureClass\">solr.processor.Lookup3Signature</str>\n       </processor>\n       <processor class=\"solr.LogUpdateProcessorFactory\" />\n       <processor class=\"solr.RunUpdateProcessorFactory\" />\n     </updateRequestProcessorChain>\n    -->\n\n  <!-- Language identification\n\n       This example update chain identifies the language of the incoming\n       documents using the langid contrib. The detected language is\n       written to field language_s. No field name mapping is done.\n       The fields used for detection are text, title, subject and description,\n       making this example suitable for detecting languages form full-text\n       rich documents injected via ExtractingRequestHandler.\n       See more about langId at http://wiki.apache.org/solr/LanguageDetection\n    -->\n  <!--\n   <updateRequestProcessorChain name=\"langid\">\n     <processor class=\"org.apache.solr.update.processor.TikaLanguageIdentifierUpdateProcessorFactory\">\n       <str name=\"langid.fl\">text,title,subject,description</str>\n       <str name=\"langid.langField\">language_s</str>\n       <str name=\"langid.fallback\">en</str>\n     </processor>\n     <processor class=\"solr.LogUpdateProcessorFactory\" />\n     <processor class=\"solr.RunUpdateProcessorFactory\" />\n   </updateRequestProcessorChain>\n  -->\n\n  <!-- Script update processor\n\n    This example hooks in an update processor implemented using JavaScript.\n\n    See more about the script update processor at http://wiki.apache.org/solr/ScriptUpdateProcessor\n  -->\n  <!--\n    <updateRequestProcessorChain name=\"script\">\n      <processor class=\"solr.StatelessScriptUpdateProcessorFactory\">\n        <str name=\"script\">update-script.js</str>\n        <lst name=\"params\">\n          <str name=\"config_param\">example config parameter</str>\n        </lst>\n      </processor>\n      <processor class=\"solr.RunUpdateProcessorFactory\" />\n    </updateRequestProcessorChain>\n  -->\n\n  <!-- Response Writers\n\n       http://wiki.apache.org/solr/QueryResponseWriter\n\n       Request responses will be written using the writer specified by\n       the 'wt' request parameter matching the name of a registered\n       writer.\n\n       The \"default\" writer is the default and will be used if 'wt' is\n       not specified in the request.\n    -->\n  <!-- The following response writers are implicitly configured unless\n       overridden...\n    -->\n  <!--\n     <queryResponseWriter name=\"xml\"\n                          default=\"true\"\n                          class=\"solr.XMLResponseWriter\" />\n     <queryResponseWriter name=\"json\" class=\"solr.JSONResponseWriter\"/>\n     <queryResponseWriter name=\"python\" class=\"solr.PythonResponseWriter\"/>\n     <queryResponseWriter name=\"ruby\" class=\"solr.RubyResponseWriter\"/>\n     <queryResponseWriter name=\"php\" class=\"solr.PHPResponseWriter\"/>\n     <queryResponseWriter name=\"phps\" class=\"solr.PHPSerializedResponseWriter\"/>\n     <queryResponseWriter name=\"csv\" class=\"solr.CSVResponseWriter\"/>\n     <queryResponseWriter name=\"schema.xml\" class=\"solr.SchemaXmlResponseWriter\"/>\n    -->\n\n  <queryResponseWriter name=\"json\" class=\"solr.JSONResponseWriter\">\n    <!-- For the purposes of the tutorial, JSON responses are written as\n     plain text so that they are easy to read in *any* browser.\n     If you expect a MIME type of \"application/json\" just remove this override.\n    -->\n    <str name=\"content-type\">text/plain; charset=UTF-8</str>\n  </queryResponseWriter>\n\n  <!--\n     Custom response writers can be declared as needed...\n    -->\n  <queryResponseWriter name=\"velocity\" class=\"solr.VelocityResponseWriter\" startup=\"lazy\">\n    <str name=\"template.base.dir\">${velocity.template.base.dir:}</str>\n    <str name=\"solr.resource.loader.enabled\">${velocity.solr.resource.loader.enabled:true}</str>\n    <str name=\"params.resource.loader.enabled\">${velocity.params.resource.loader.enabled:false}</str>\n  </queryResponseWriter>\n\n  <!-- XSLT response writer transforms the XML output by any xslt file found\n       in Solr's conf/xslt directory.  Changes to xslt files are checked for\n       every xsltCacheLifetimeSeconds.\n    -->\n  <queryResponseWriter name=\"xslt\" class=\"solr.XSLTResponseWriter\">\n    <int name=\"xsltCacheLifetimeSeconds\">5</int>\n  </queryResponseWriter>\n\n  <!-- Query Parsers\n\n       https://cwiki.apache.org/confluence/display/solr/Query+Syntax+and+Parsing\n\n       Multiple QParserPlugins can be registered by name, and then\n       used in either the \"defType\" param for the QueryComponent (used\n       by SearchHandler) or in LocalParams\n    -->\n  <!-- example of registering a query parser -->\n  <!--\n     <queryParser name=\"myparser\" class=\"com.mycompany.MyQParserPlugin\"/>\n    -->\n\n  <!-- Function Parsers\n\n       http://wiki.apache.org/solr/FunctionQuery\n\n       Multiple ValueSourceParsers can be registered by name, and then\n       used as function names when using the \"func\" QParser.\n    -->\n  <!-- example of registering a custom function parser  -->\n  <!--\n     <valueSourceParser name=\"myfunc\"\n                        class=\"com.mycompany.MyValueSourceParser\" />\n    -->\n\n\n  <!-- Document Transformers\n       http://wiki.apache.org/solr/DocTransformers\n    -->\n  <!--\n     Could be something like:\n     <transformer name=\"db\" class=\"com.mycompany.LoadFromDatabaseTransformer\" >\n       <int name=\"connection\">jdbc://....</int>\n     </transformer>\n\n     To add a constant value to all docs, use:\n     <transformer name=\"mytrans2\" class=\"org.apache.solr.response.transform.ValueAugmenterFactory\" >\n       <int name=\"value\">5</int>\n     </transformer>\n\n     If you want the user to still be able to change it with _value:something_ use this:\n     <transformer name=\"mytrans3\" class=\"org.apache.solr.response.transform.ValueAugmenterFactory\" >\n       <double name=\"defaultValue\">5</double>\n     </transformer>\n\n      If you are using the QueryElevationComponent, you may wish to mark documents that get boosted.  The\n      EditorialMarkerFactory will do exactly that:\n     <transformer name=\"qecBooster\" class=\"org.apache.solr.response.transform.EditorialMarkerFactory\" />\n    -->\n\n    <!-- Extras noted by users of DJangoHaystack -->\n    <requestHandler name=\"/analysis/field\"\n                  startup=\"lazy\"\n                  class=\"solr.FieldAnalysisRequestHandler\" />\n    <requestHandler name=\"/analysis/document\"\n                  class=\"solr.DocumentAnalysisRequestHandler\"\n                  startup=\"lazy\" />\n</config>\n"
  },
  {
    "path": "haystack/templatetags/__init__.py",
    "content": ""
  },
  {
    "path": "haystack/templatetags/highlight.py",
    "content": "from django import template\nfrom django.conf import settings\nfrom django.core.exceptions import ImproperlyConfigured\n\nfrom haystack.utils import importlib\n\nregister = template.Library()\n\n\nclass HighlightNode(template.Node):\n    def __init__(\n        self, text_block, query, html_tag=None, css_class=None, max_length=None\n    ):\n        self.text_block = template.Variable(text_block)\n        self.query = template.Variable(query)\n        self.html_tag = html_tag\n        self.css_class = css_class\n        self.max_length = max_length\n\n        if html_tag is not None:\n            self.html_tag = template.Variable(html_tag)\n\n        if css_class is not None:\n            self.css_class = template.Variable(css_class)\n\n        if max_length is not None:\n            self.max_length = template.Variable(max_length)\n\n    def render(self, context):\n        text_block = self.text_block.resolve(context)\n        query = self.query.resolve(context)\n        kwargs = {}\n\n        if self.html_tag is not None:\n            kwargs[\"html_tag\"] = self.html_tag.resolve(context)\n\n        if self.css_class is not None:\n            kwargs[\"css_class\"] = self.css_class.resolve(context)\n\n        if self.max_length is not None:\n            kwargs[\"max_length\"] = self.max_length.resolve(context)\n\n        # Handle a user-defined highlighting function.\n        if (\n            hasattr(settings, \"HAYSTACK_CUSTOM_HIGHLIGHTER\")\n            and settings.HAYSTACK_CUSTOM_HIGHLIGHTER\n        ):\n            # Do the import dance.\n            try:\n                path_bits = settings.HAYSTACK_CUSTOM_HIGHLIGHTER.split(\".\")\n                highlighter_path, highlighter_classname = (\n                    \".\".join(path_bits[:-1]),\n                    path_bits[-1],\n                )\n                highlighter_module = importlib.import_module(highlighter_path)\n                highlighter_class = getattr(highlighter_module, highlighter_classname)\n            except (ImportError, AttributeError) as e:\n                raise ImproperlyConfigured(\n                    \"The highlighter '%s' could not be imported: %s\"\n                    % (settings.HAYSTACK_CUSTOM_HIGHLIGHTER, e)\n                )\n        else:\n            from haystack.utils.highlighting import Highlighter\n\n            highlighter_class = Highlighter\n\n        highlighter = highlighter_class(query, **kwargs)\n        highlighted_text = highlighter.highlight(text_block)\n        return highlighted_text\n\n\n@register.tag\ndef highlight(parser, token):\n    \"\"\"\n    Takes a block of text and highlights words from a provided query within that\n    block of text. Optionally accepts arguments to provide the HTML tag to wrap\n    highlighted word in, a CSS class to use with the tag and a maximum length of\n    the blurb in characters.\n\n    Syntax::\n\n        {% highlight <text_block> with <query> [css_class \"class_name\"] [html_tag \"span\"] [max_length 200] %}\n\n    Example::\n\n        # Highlight summary with default behavior.\n        {% highlight result.summary with request.query %}\n\n        # Highlight summary but wrap highlighted words with a div and the\n        # following CSS class.\n        {% highlight result.summary with request.query html_tag \"div\" css_class \"highlight_me_please\" %}\n\n        # Highlight summary but only show 40 characters.\n        {% highlight result.summary with request.query max_length 40 %}\n    \"\"\"\n    bits = token.split_contents()\n    tag_name = bits[0]\n\n    if not len(bits) % 2 == 0:\n        raise template.TemplateSyntaxError(\n            \"'%s' tag requires valid pairings arguments.\" % tag_name\n        )\n\n    text_block = bits[1]\n\n    if len(bits) < 4:\n        raise template.TemplateSyntaxError(\n            \"'%s' tag requires an object and a query provided by 'with'.\" % tag_name\n        )\n\n    if bits[2] != \"with\":\n        raise template.TemplateSyntaxError(\n            \"'%s' tag's second argument should be 'with'.\" % tag_name\n        )\n\n    query = bits[3]\n\n    arg_bits = iter(bits[4:])\n    kwargs = {}\n\n    for bit in arg_bits:\n        if bit == \"css_class\":\n            kwargs[\"css_class\"] = next(arg_bits)\n\n        if bit == \"html_tag\":\n            kwargs[\"html_tag\"] = next(arg_bits)\n\n        if bit == \"max_length\":\n            kwargs[\"max_length\"] = next(arg_bits)\n\n    return HighlightNode(text_block, query, **kwargs)\n"
  },
  {
    "path": "haystack/templatetags/more_like_this.py",
    "content": "import logging\n\nfrom django import template\n\nfrom haystack.query import SearchQuerySet\nfrom haystack.utils.app_loading import haystack_get_model\n\nregister = template.Library()\n\n\nclass MoreLikeThisNode(template.Node):\n    def __init__(self, model, varname, for_types=None, limit=None):\n        self.model = template.Variable(model)\n        self.varname = varname\n        self.for_types = for_types\n        self.limit = limit\n\n        if self.limit is not None:\n            self.limit = int(self.limit)\n\n    def render(self, context):\n        try:\n            model_instance = self.model.resolve(context)\n            sqs = SearchQuerySet()\n\n            if self.for_types is not None:\n                intermediate = template.Variable(self.for_types)\n                for_types = intermediate.resolve(context).split(\",\")\n                search_models = []\n\n                for model in for_types:\n                    model_class = haystack_get_model(*model.split(\".\"))\n\n                    if model_class:\n                        search_models.append(model_class)\n\n                sqs = sqs.models(*search_models)\n\n            sqs = sqs.more_like_this(model_instance)\n\n            if self.limit is not None:\n                sqs = sqs[: self.limit]\n\n            context[self.varname] = sqs\n        except Exception as exc:\n            logging.warning(\n                \"Unhandled exception rendering %r: %s\", self, exc, exc_info=True\n            )\n\n        return \"\"\n\n\n@register.tag\ndef more_like_this(parser, token):\n    \"\"\"\n    Fetches similar items from the search index to find content that is similar\n    to the provided model's content.\n\n    Syntax::\n\n        {% more_like_this model_instance as varname [for app_label.model_name,app_label.model_name,...] [limit n] %}\n\n    Example::\n\n        # Pull a full SearchQuerySet (lazy loaded) of similar content.\n        {% more_like_this entry as related_content %}\n\n        # Pull just the top 5 similar pieces of content.\n        {% more_like_this entry as related_content limit 5  %}\n\n        # Pull just the top 5 similar entries or comments.\n        {% more_like_this entry as related_content for \"blog.entry,comments.comment\" limit 5  %}\n    \"\"\"\n    bits = token.split_contents()\n\n    if not len(bits) in (4, 6, 8):\n        raise template.TemplateSyntaxError(\n            \"'%s' tag requires either 3, 5 or 7 arguments.\" % bits[0]\n        )\n\n    model = bits[1]\n\n    if bits[2] != \"as\":\n        raise template.TemplateSyntaxError(\n            \"'%s' tag's second argument should be 'as'.\" % bits[0]\n        )\n\n    varname = bits[3]\n    limit = None\n    for_types = None\n\n    if len(bits) == 6:\n        if bits[4] != \"limit\" and bits[4] != \"for\":\n            raise template.TemplateSyntaxError(\n                \"'%s' tag's fourth argument should be either 'limit' or 'for'.\"\n                % bits[0]\n            )\n\n        if bits[4] == \"limit\":\n            limit = bits[5]\n        else:\n            for_types = bits[5]\n\n    if len(bits) == 8:\n        if bits[4] != \"for\":\n            raise template.TemplateSyntaxError(\n                \"'%s' tag's fourth argument should be 'for'.\" % bits[0]\n            )\n\n        for_types = bits[5]\n\n        if bits[6] != \"limit\":\n            raise template.TemplateSyntaxError(\n                \"'%s' tag's sixth argument should be 'limit'.\" % bits[0]\n            )\n\n        limit = bits[7]\n\n    return MoreLikeThisNode(model, varname, for_types, limit)\n"
  },
  {
    "path": "haystack/urls.py",
    "content": "from django.urls import path\n\nfrom haystack.views import SearchView\n\nurlpatterns = [path(\"\", SearchView(), name=\"haystack_search\")]\n"
  },
  {
    "path": "haystack/utils/__init__.py",
    "content": "import importlib\nimport re\n\nfrom django.conf import settings\n\nfrom haystack.constants import DJANGO_CT, DJANGO_ID, ID\n\nIDENTIFIER_REGEX = re.compile(r\"^[\\w\\d_]+\\.[\\w\\d_]+\\.[\\w\\d-]+$\")\n\n\ndef default_get_identifier(obj_or_string):\n    \"\"\"\n    Get an unique identifier for the object or a string representing the\n    object.\n\n    If not overridden, uses <app_label>.<object_name>.<pk>.\n    \"\"\"\n    if isinstance(obj_or_string, str):\n        if not IDENTIFIER_REGEX.match(obj_or_string):\n            raise AttributeError(\n                \"Provided string '%s' is not a valid identifier.\" % obj_or_string\n            )\n\n        return obj_or_string\n\n    return \"%s.%s\" % (get_model_ct(obj_or_string), obj_or_string._get_pk_val())\n\n\ndef _lookup_identifier_method():\n    \"\"\"\n    If the user has set HAYSTACK_IDENTIFIER_METHOD, import it and return the method uncalled.\n    If HAYSTACK_IDENTIFIER_METHOD is not defined, return haystack.utils.default_get_identifier.\n\n    This always runs at module import time.  We keep the code in a function\n    so that it can be called from unit tests, in order to simulate the re-loading\n    of this module.\n    \"\"\"\n    if not hasattr(settings, \"HAYSTACK_IDENTIFIER_METHOD\"):\n        return default_get_identifier\n\n    module_path, method_name = settings.HAYSTACK_IDENTIFIER_METHOD.rsplit(\".\", 1)\n\n    try:\n        module = importlib.import_module(module_path)\n    except ImportError:\n        raise ImportError(\n            \"Unable to import module '%s' provided for HAYSTACK_IDENTIFIER_METHOD.\"\n            % module_path\n        )\n\n    identifier_method = getattr(module, method_name, None)\n\n    if not identifier_method:\n        raise AttributeError(\n            \"Provided method '%s' for HAYSTACK_IDENTIFIER_METHOD does not exist in '%s'.\"\n            % (method_name, module_path)\n        )\n\n    return identifier_method\n\n\nget_identifier = _lookup_identifier_method()\n\n\ndef get_model_ct_tuple(model):\n    # Deferred models should be identified as if they were the underlying model.\n    model_name = (\n        model._meta.concrete_model._meta.model_name\n        if hasattr(model, \"_deferred\") and model._deferred\n        else model._meta.model_name\n    )\n    return (model._meta.app_label, model_name)\n\n\ndef get_model_ct(model):\n    return \"%s.%s\" % get_model_ct_tuple(model)\n\n\ndef get_facet_field_name(fieldname):\n    if fieldname in [ID, DJANGO_ID, DJANGO_CT]:\n        return fieldname\n\n    return \"%s_exact\" % fieldname\n"
  },
  {
    "path": "haystack/utils/app_loading.py",
    "content": "from django.apps import apps\nfrom django.core.exceptions import ImproperlyConfigured\n\n__all__ = [\"haystack_get_models\", \"haystack_load_apps\"]\n\nAPP = \"app\"\nMODEL = \"model\"\n\n\ndef haystack_get_app_modules():\n    \"\"\"Return the Python module for each installed app\"\"\"\n    return [i.module for i in apps.get_app_configs()]\n\n\ndef haystack_load_apps():\n    \"\"\"Return a list of app labels for all installed applications which have models\"\"\"\n    return [i.label for i in apps.get_app_configs() if i.models_module is not None]\n\n\ndef haystack_get_models(label):\n    try:\n        app_mod = apps.get_app_config(label)\n        return app_mod.get_models()\n    except LookupError:\n        if \".\" not in label:\n            raise ImproperlyConfigured(\"Unknown application label {}\".format(label))\n        app_label, model_name = label.rsplit(\".\", 1)\n        return [apps.get_model(app_label, model_name)]\n    except ImproperlyConfigured:\n        pass\n\n\ndef haystack_get_model(app_label, model_name):\n    return apps.get_model(app_label, model_name)\n"
  },
  {
    "path": "haystack/utils/geo.py",
    "content": "from haystack.constants import WGS_84_SRID\nfrom haystack.exceptions import SpatialError\n\n\ndef ensure_geometry(geom):\n    \"\"\"\n    Makes sure the parameter passed in looks like a GEOS ``GEOSGeometry``.\n    \"\"\"\n    if not hasattr(geom, \"geom_type\"):\n        raise SpatialError(\"Point '%s' doesn't appear to be a GEOS geometry.\" % geom)\n\n    return geom\n\n\ndef ensure_point(geom):\n    \"\"\"\n    Makes sure the parameter passed in looks like a GEOS ``Point``.\n    \"\"\"\n    ensure_geometry(geom)\n\n    if geom.geom_type != \"Point\":\n        raise SpatialError(\"Provided geometry '%s' is not a 'Point'.\" % geom)\n\n    return geom\n\n\ndef ensure_wgs84(point):\n    \"\"\"\n    Ensures the point passed in is a GEOS ``Point`` & returns that point's\n    data is in the WGS-84 spatial reference.\n    \"\"\"\n    ensure_point(point)\n    # Clone it so we don't alter the original, in case they're using it for\n    # something else.\n    new_point = point.clone()\n\n    if not new_point.srid:\n        # It has no spatial reference id. Assume WGS-84.\n        new_point.srid = WGS_84_SRID\n    elif new_point.srid != WGS_84_SRID:\n        # Transform it to get to the right system.\n        new_point.transform(WGS_84_SRID)\n\n    return new_point\n\n\ndef ensure_distance(dist):\n    \"\"\"\n    Makes sure the parameter passed in is a 'Distance' object.\n    \"\"\"\n    try:\n        # Since we mostly only care about the ``.km`` attribute, make sure\n        # it's there.\n        dist.km\n    except AttributeError:\n        raise SpatialError(\"'%s' does not appear to be a 'Distance' object.\" % dist)\n\n    return dist\n\n\ndef generate_bounding_box(bottom_left, top_right):\n    \"\"\"\n    Takes two opposite corners of a bounding box (order matters!) & generates\n    a two-tuple of the correct coordinates for the bounding box.\n\n    The two-tuple is in the form ``((min_lat, min_lng), (max_lat, max_lng))``.\n    \"\"\"\n    west, lat_1 = bottom_left.coords\n    east, lat_2 = top_right.coords\n    min_lat, max_lat = min(lat_1, lat_2), max(lat_1, lat_2)\n    return ((min_lat, west), (max_lat, east))\n"
  },
  {
    "path": "haystack/utils/highlighting.py",
    "content": "from django.utils.html import strip_tags\n\n\nclass Highlighter(object):\n    css_class = \"highlighted\"\n    html_tag = \"span\"\n    max_length = 200\n    text_block = \"\"\n\n    def __init__(self, query, **kwargs):\n        self.query = query\n\n        if \"max_length\" in kwargs:\n            self.max_length = int(kwargs[\"max_length\"])\n\n        if \"html_tag\" in kwargs:\n            self.html_tag = kwargs[\"html_tag\"]\n\n        if \"css_class\" in kwargs:\n            self.css_class = kwargs[\"css_class\"]\n\n        self.query_words = {\n            word.lower() for word in self.query.split() if not word.startswith(\"-\")\n        }\n\n    def highlight(self, text_block):\n        self.text_block = strip_tags(text_block)\n        highlight_locations = self.find_highlightable_words()\n        start_offset, end_offset = self.find_window(highlight_locations)\n        return self.render_html(highlight_locations, start_offset, end_offset)\n\n    def find_highlightable_words(self):\n        # Use a set so we only do this once per unique word.\n        word_positions = {}\n\n        # Pre-compute the length.\n        end_offset = len(self.text_block)\n        lower_text_block = self.text_block.lower()\n\n        for word in self.query_words:\n            if word not in word_positions:\n                word_positions[word] = []\n\n            start_offset = 0\n\n            while start_offset < end_offset:\n                next_offset = lower_text_block.find(word, start_offset, end_offset)\n\n                # If we get a -1 out of find, it wasn't found. Bomb out and\n                # start the next word.\n                if next_offset == -1:\n                    break\n\n                word_positions[word].append(next_offset)\n                start_offset = next_offset + len(word)\n\n        return word_positions\n\n    def find_window(self, highlight_locations):\n        best_start = 0\n        best_end = self.max_length\n\n        # First, make sure we have words.\n        if not len(highlight_locations):\n            return (best_start, best_end)\n\n        words_found = []\n\n        # Next, make sure we found any words at all.\n        for _, offset_list in highlight_locations.items():\n            if len(offset_list):\n                # Add all of the locations to the list.\n                words_found.extend(offset_list)\n\n        if not len(words_found):\n            return (best_start, best_end)\n\n        if len(words_found) == 1:\n            return (words_found[0], words_found[0] + self.max_length)\n\n        # Sort the list so it's in ascending order.\n        words_found = sorted(words_found)\n\n        # We now have a denormalized list of all positions were a word was\n        # found. We'll iterate through and find the densest window we can by\n        # counting the number of found offsets (-1 to fit in the window).\n        highest_density = 0\n\n        if words_found[:-1][0] > self.max_length:\n            best_start = words_found[:-1][0]\n            best_end = best_start + self.max_length\n\n        for count, start in enumerate(words_found[:-1]):\n            current_density = 1\n\n            for end in words_found[count + 1 :]:\n                if end - start < self.max_length:\n                    current_density += 1\n                else:\n                    current_density = 0\n\n                # Only replace if we have a bigger (not equal density) so we\n                # give deference to windows earlier in the document.\n                if current_density > highest_density:\n                    best_start = start\n                    best_end = start + self.max_length\n                    highest_density = current_density\n\n        return (best_start, best_end)\n\n    def render_html(self, highlight_locations=None, start_offset=None, end_offset=None):\n        # Start by chopping the block down to the proper window.\n        text = self.text_block[start_offset:end_offset]\n\n        # Invert highlight_locations to a location -> term list\n        term_list = []\n\n        for term, locations in highlight_locations.items():\n            term_list += [(loc - start_offset, term) for loc in locations]\n\n        loc_to_term = sorted(term_list)\n\n        # Prepare the highlight template\n        if self.css_class:\n            hl_start = '<%s class=\"%s\">' % (self.html_tag, self.css_class)\n        else:\n            hl_start = \"<%s>\" % (self.html_tag)\n\n        hl_end = \"</%s>\" % self.html_tag\n\n        # Copy the part from the start of the string to the first match,\n        # and there replace the match with a highlighted version.\n        highlighted_chunk = \"\"\n        matched_so_far = 0\n        prev = 0\n        prev_str = \"\"\n\n        for cur, cur_str in loc_to_term:\n            # This can be in a different case than cur_str\n            actual_term = text[cur : cur + len(cur_str)]\n\n            # Handle incorrect highlight_locations by first checking for the term\n            if actual_term.lower() == cur_str:\n                if cur < prev + len(prev_str):\n                    continue\n\n                highlighted_chunk += (\n                    text[prev + len(prev_str) : cur] + hl_start + actual_term + hl_end\n                )\n                prev = cur\n                prev_str = cur_str\n\n                # Keep track of how far we've copied so far, for the last step\n                matched_so_far = cur + len(actual_term)\n\n        # Don't forget the chunk after the last term\n        highlighted_chunk += text[matched_so_far:]\n\n        if start_offset > 0:\n            highlighted_chunk = \"...%s\" % highlighted_chunk\n\n        if end_offset < len(self.text_block):\n            highlighted_chunk = \"%s...\" % highlighted_chunk\n\n        return highlighted_chunk\n"
  },
  {
    "path": "haystack/utils/loading.py",
    "content": "import copy\nimport inspect\nimport threading\nimport warnings\nfrom collections import OrderedDict\n\nfrom django.conf import settings\nfrom django.core.exceptions import ImproperlyConfigured\nfrom django.utils.module_loading import module_has_submodule\n\nfrom haystack import constants\nfrom haystack.exceptions import NotHandled, SearchFieldError\nfrom haystack.utils import importlib\nfrom haystack.utils.app_loading import haystack_get_app_modules\n\n\ndef import_class(path):\n    path_bits = path.split(\".\")\n    # Cut off the class name at the end.\n    class_name = path_bits.pop()\n    module_path = \".\".join(path_bits)\n    module_itself = importlib.import_module(module_path)\n\n    if not hasattr(module_itself, class_name):\n        raise ImportError(\n            \"The Python module '%s' has no '%s' class.\" % (module_path, class_name)\n        )\n\n    return getattr(module_itself, class_name)\n\n\n# Load the search backend.\ndef load_backend(full_backend_path):\n    \"\"\"\n    Loads a backend for interacting with the search engine.\n\n    Requires a ``backend_path``. It should be a string resembling a Python\n    import path, pointing to a ``BaseEngine`` subclass. The built-in options\n    available include::\n\n      * haystack.backends.solr.SolrEngine\n      * haystack.backends.xapian.XapianEngine (third-party)\n      * haystack.backends.whoosh.WhooshEngine\n      * haystack.backends.simple.SimpleEngine\n\n    If you've implemented a custom backend, you can provide the path to\n    your backend & matching ``Engine`` class. For example::\n\n      ``myapp.search_backends.CustomSolrEngine``\n\n    \"\"\"\n    path_bits = full_backend_path.split(\".\")\n\n    if len(path_bits) < 2:\n        raise ImproperlyConfigured(\n            \"The provided backend '%s' is not a complete Python path to a BaseEngine subclass.\"\n            % full_backend_path\n        )\n\n    return import_class(full_backend_path)\n\n\ndef load_router(full_router_path):\n    \"\"\"\n    Loads a router for choosing which connection to use.\n\n    Requires a ``full_router_path``. It should be a string resembling a Python\n    import path, pointing to a ``BaseRouter`` subclass. The built-in options\n    available include::\n\n      * haystack.routers.DefaultRouter\n\n    If you've implemented a custom backend, you can provide the path to\n    your backend & matching ``Engine`` class. For example::\n\n      ``myapp.search_routers.MasterSlaveRouter``\n\n    \"\"\"\n    path_bits = full_router_path.split(\".\")\n\n    if len(path_bits) < 2:\n        raise ImproperlyConfigured(\n            \"The provided router '%s' is not a complete Python path to a BaseRouter subclass.\"\n            % full_router_path\n        )\n\n    return import_class(full_router_path)\n\n\nclass ConnectionHandler(object):\n    def __init__(self, connections_info):\n        self.connections_info = connections_info\n        self.thread_local = threading.local()\n        self._index = None\n\n    def ensure_defaults(self, alias):\n        try:\n            conn = self.connections_info[alias]\n        except KeyError:\n            raise ImproperlyConfigured(\n                \"The key '%s' isn't an available connection.\" % alias\n            )\n\n        if not conn.get(\"ENGINE\"):\n            conn[\"ENGINE\"] = \"haystack.backends.simple_backend.SimpleEngine\"\n\n    def __getitem__(self, key):\n        if not hasattr(self.thread_local, \"connections\"):\n            self.thread_local.connections = {}\n        elif key in self.thread_local.connections:\n            return self.thread_local.connections[key]\n\n        self.ensure_defaults(key)\n        self.thread_local.connections[key] = load_backend(\n            self.connections_info[key][\"ENGINE\"]\n        )(using=key)\n        return self.thread_local.connections[key]\n\n    def reload(self, key):\n        if not hasattr(self.thread_local, \"connections\"):\n            self.thread_local.connections = {}\n        try:\n            del self.thread_local.connections[key]\n        except KeyError:\n            pass\n\n        return self.__getitem__(key)\n\n    def all(self):  # noqa A003\n        return [self[alias] for alias in self.connections_info]\n\n\nclass ConnectionRouter(object):\n    def __init__(self):\n        self._routers = None\n\n    @property\n    def routers(self):\n        if self._routers is None:\n            default_routers = [\"haystack.routers.DefaultRouter\"]\n            router_list = getattr(settings, \"HAYSTACK_ROUTERS\", default_routers)\n            # in case HAYSTACK_ROUTERS is empty, fallback to default routers\n            if not len(router_list):\n                router_list = default_routers\n\n            self._routers = []\n            for router_path in router_list:\n                router_class = load_router(router_path)\n                self._routers.append(router_class())\n        return self._routers\n\n    def _for_action(self, action, many, **hints):\n        conns = []\n\n        for router in self.routers:\n            if hasattr(router, action):\n                action_callable = getattr(router, action)\n                connection_to_use = action_callable(**hints)\n\n                if connection_to_use is not None:\n                    if isinstance(connection_to_use, str):\n                        conns.append(connection_to_use)\n                    else:\n                        conns.extend(connection_to_use)\n                    if not many:\n                        break\n\n        return conns\n\n    def for_write(self, **hints):\n        return self._for_action(\"for_write\", True, **hints)\n\n    def for_read(self, **hints):\n        return self._for_action(\"for_read\", False, **hints)[0]\n\n\nclass UnifiedIndex(object):\n    # Used to collect all the indexes into a cohesive whole.\n    def __init__(self, excluded_indexes=None):\n        self._indexes = {}\n        self.fields = OrderedDict()\n        self._built = False\n        self.excluded_indexes = excluded_indexes or []\n        self.excluded_indexes_ids = {}\n        self.document_field = constants.DOCUMENT_FIELD\n        self._fieldnames = {}\n        self._facet_fieldnames = {}\n\n    @property\n    def indexes(self):\n        warnings.warn(\n            \"'UnifiedIndex.indexes' was deprecated in Haystack v2.3.0. Please use UnifiedIndex.get_indexes().\"\n        )\n        return self._indexes\n\n    def collect_indexes(self):\n        indexes = []\n\n        for app_mod in haystack_get_app_modules():\n            try:\n                search_index_module = importlib.import_module(\n                    \"%s.search_indexes\" % app_mod.__name__\n                )\n            except ImportError:\n                if module_has_submodule(app_mod, \"search_indexes\"):\n                    raise\n\n                continue\n\n            for item_name, item in inspect.getmembers(\n                search_index_module, inspect.isclass\n            ):\n                if getattr(item, \"haystack_use_for_indexing\", False) and getattr(\n                    item, \"get_model\", None\n                ):\n                    # We've got an index. Check if we should be ignoring it.\n                    class_path = \"%s.search_indexes.%s\" % (app_mod.__name__, item_name)\n\n                    if (\n                        class_path in self.excluded_indexes\n                        or self.excluded_indexes_ids.get(item_name) == id(item)\n                    ):\n                        self.excluded_indexes_ids[str(item_name)] = id(item)\n                        continue\n\n                    indexes.append(item())\n\n        return indexes\n\n    def reset(self):\n        self._indexes = {}\n        self.fields = OrderedDict()\n        self._built = False\n        self._fieldnames = {}\n        self._facet_fieldnames = {}\n\n    def build(self, indexes=None):\n        self.reset()\n\n        if indexes is None:\n            indexes = self.collect_indexes()\n\n        for index in indexes:\n            model = index.get_model()\n\n            if model in self._indexes:\n                raise ImproperlyConfigured(\n                    \"Model '%s' has more than one 'SearchIndex`` handling it. \"\n                    \"Please exclude either '%s' or '%s' using the 'EXCLUDED_INDEXES' \"\n                    \"setting defined in 'settings.HAYSTACK_CONNECTIONS'.\"\n                    % (model, self._indexes[model], index)\n                )\n\n            self._indexes[model] = index\n            self.collect_fields(index)\n\n        self._built = True\n\n    def collect_fields(self, index):\n        for fieldname, field_object in index.fields.items():\n            if field_object.document is True:\n                if field_object.index_fieldname != self.document_field:\n                    raise SearchFieldError(\n                        \"All 'SearchIndex' classes must use the same '%s' fieldname for the 'document=True' field. Offending index is '%s'.\"\n                        % (self.document_field, index)\n                    )\n\n            # Stow the index_fieldname so we don't have to get it the hard way again.\n            if (\n                fieldname in self._fieldnames\n                and field_object.index_fieldname != self._fieldnames[fieldname]\n            ):\n                # We've already seen this field in the list. Raise an exception if index_fieldname differs.\n                raise SearchFieldError(\n                    \"All uses of the '%s' field need to use the same 'index_fieldname' attribute.\"\n                    % fieldname\n                )\n\n            self._fieldnames[fieldname] = field_object.index_fieldname\n\n            # Stow the facet_fieldname so we don't have to look that up either.\n            if hasattr(field_object, \"facet_for\"):\n                if field_object.facet_for:\n                    self._facet_fieldnames[field_object.facet_for] = fieldname\n                else:\n                    self._facet_fieldnames[field_object.instance_name] = fieldname\n\n            # Copy the field in so we've got a unified schema.\n            if field_object.index_fieldname not in self.fields:\n                self.fields[field_object.index_fieldname] = field_object\n                self.fields[field_object.index_fieldname] = copy.copy(field_object)\n            else:\n                # If the field types are different, we can mostly\n                # safely ignore this. The exception is ``MultiValueField``,\n                # in which case we'll use it instead, copying over the\n                # values.\n                if field_object.is_multivalued:\n                    old_field = self.fields[field_object.index_fieldname]\n                    self.fields[field_object.index_fieldname] = field_object\n                    self.fields[field_object.index_fieldname] = copy.copy(field_object)\n\n                    # Switch it so we don't have to dupe the remaining\n                    # checks.\n                    field_object = old_field\n\n                # We've already got this field in the list. Ensure that\n                # what we hand back is a superset of all options that\n                # affect the schema.\n                if field_object.indexed is True:\n                    self.fields[field_object.index_fieldname].indexed = True\n\n                if field_object.stored is True:\n                    self.fields[field_object.index_fieldname].stored = True\n\n                if field_object.faceted is True:\n                    self.fields[field_object.index_fieldname].faceted = True\n\n                if field_object.use_template is True:\n                    self.fields[field_object.index_fieldname].use_template = True\n\n                if field_object.null is True:\n                    self.fields[field_object.index_fieldname].null = True\n\n    def get_indexes(self):\n        if not self._built:\n            self.build()\n\n        return self._indexes\n\n    def get_indexed_models(self):\n        # Ensuring a list here since Python3 will give us an iterator\n        return list(self.get_indexes().keys())\n\n    def get_index_fieldname(self, field):\n        if not self._built:\n            self.build()\n\n        return self._fieldnames.get(field) or field\n\n    def get_index(self, model_klass):\n\n        indexes = self.get_indexes()\n\n        if model_klass not in indexes:\n            raise NotHandled(\"The model %s is not registered\" % model_klass)\n\n        return indexes[model_klass]\n\n    def get_facet_fieldname(self, field):\n        if not self._built:\n            self.build()\n\n        for fieldname, field_object in self.fields.items():\n            if fieldname != field:\n                continue\n\n            if hasattr(field_object, \"facet_for\"):\n                if field_object.facet_for:\n                    return field_object.facet_for\n                else:\n                    return field_object.instance_name\n            else:\n                return self._facet_fieldnames.get(field) or field\n\n        return field\n\n    def all_searchfields(self):\n        if not self._built:\n            self.build()\n\n        return self.fields\n"
  },
  {
    "path": "haystack/utils/log.py",
    "content": "import logging\n\nfrom django.conf import settings\n\n\ndef getLogger(name):\n    real_logger = logging.getLogger(name)\n    return LoggingFacade(real_logger)\n\n\nclass LoggingFacade(object):\n    def __init__(self, real_logger):\n        self.real_logger = real_logger\n\n    def noop(self, *args, **kwargs):\n        pass\n\n    def __getattr__(self, attr):\n        if getattr(settings, \"HAYSTACK_LOGGING\", True):\n            return getattr(self.real_logger, attr)\n        return self.noop\n"
  },
  {
    "path": "haystack/views.py",
    "content": "from django.conf import settings\nfrom django.core.paginator import InvalidPage, Paginator\nfrom django.http import Http404\nfrom django.shortcuts import render\n\nfrom haystack.forms import FacetedSearchForm, ModelSearchForm\nfrom haystack.query import EmptySearchQuerySet\n\nRESULTS_PER_PAGE = getattr(settings, \"HAYSTACK_SEARCH_RESULTS_PER_PAGE\", 20)\n\n\nclass SearchView(object):\n    template = \"search/search.html\"\n    extra_context = {}\n    query = \"\"\n    results = EmptySearchQuerySet()\n    request = None\n    form = None\n    results_per_page = RESULTS_PER_PAGE\n\n    def __init__(\n        self,\n        template=None,\n        load_all=True,\n        form_class=None,\n        searchqueryset=None,\n        results_per_page=None,\n    ):\n        self.load_all = load_all\n        self.form_class = form_class\n        self.searchqueryset = searchqueryset\n\n        if form_class is None:\n            self.form_class = ModelSearchForm\n\n        if results_per_page is not None:\n            self.results_per_page = results_per_page\n\n        if template:\n            self.template = template\n\n    def __call__(self, request):\n        \"\"\"\n        Generates the actual response to the search.\n\n        Relies on internal, overridable methods to construct the response.\n        \"\"\"\n        self.request = request\n\n        self.form = self.build_form()\n        self.query = self.get_query()\n        self.results = self.get_results()\n\n        return self.create_response()\n\n    def build_form(self, form_kwargs=None):\n        \"\"\"\n        Instantiates the form the class should use to process the search query.\n        \"\"\"\n        data = None\n        kwargs = {\"load_all\": self.load_all}\n        if form_kwargs:\n            kwargs.update(form_kwargs)\n\n        if len(self.request.GET):\n            data = self.request.GET\n\n        if self.searchqueryset is not None:\n            kwargs[\"searchqueryset\"] = self.searchqueryset\n\n        return self.form_class(data, **kwargs)\n\n    def get_query(self):\n        \"\"\"\n        Returns the query provided by the user.\n\n        Returns an empty string if the query is invalid.\n        \"\"\"\n        if self.form.is_valid():\n            return self.form.cleaned_data[\"q\"]\n\n        return \"\"\n\n    def get_results(self):\n        \"\"\"\n        Fetches the results via the form.\n\n        Returns an empty list if there's no query to search with.\n        \"\"\"\n        return self.form.search()\n\n    def build_page(self):\n        \"\"\"\n        Paginates the results appropriately.\n\n        In case someone does not want to use Django's built-in pagination, it\n        should be a simple matter to override this method to do what they would\n        like.\n        \"\"\"\n        try:\n            page_no = int(self.request.GET.get(\"page\", 1))\n        except (TypeError, ValueError):\n            raise Http404(\"Not a valid number for page.\")\n\n        if page_no < 1:\n            raise Http404(\"Pages should be 1 or greater.\")\n\n        start_offset = (page_no - 1) * self.results_per_page\n        self.results[start_offset : start_offset + self.results_per_page]\n\n        paginator = Paginator(self.results, self.results_per_page)\n\n        try:\n            page = paginator.page(page_no)\n        except InvalidPage:\n            raise Http404(\"No such page!\")\n\n        return (paginator, page)\n\n    def extra_context(self):\n        \"\"\"\n        Allows the addition of more context variables as needed.\n\n        Must return a dictionary.\n        \"\"\"\n        return {}\n\n    def get_context(self):\n        (paginator, page) = self.build_page()\n\n        context = {\n            \"query\": self.query,\n            \"form\": self.form,\n            \"page\": page,\n            \"paginator\": paginator,\n            \"suggestion\": None,\n        }\n\n        if (\n            hasattr(self.results, \"query\")\n            and self.results.query.backend.include_spelling\n        ):\n            context[\"suggestion\"] = self.form.get_suggestion()\n\n        context.update(self.extra_context())\n\n        return context\n\n    def create_response(self):\n        \"\"\"\n        Generates the actual HttpResponse to send back to the user.\n        \"\"\"\n\n        context = self.get_context()\n\n        return render(self.request, self.template, context)\n\n\ndef search_view_factory(view_class=SearchView, *args, **kwargs):\n    def search_view(request):\n        return view_class(*args, **kwargs)(request)\n\n    return search_view\n\n\nclass FacetedSearchView(SearchView):\n    def __init__(self, *args, **kwargs):\n        # Needed to switch out the default form class.\n        if kwargs.get(\"form_class\") is None:\n            kwargs[\"form_class\"] = FacetedSearchForm\n\n        super().__init__(*args, **kwargs)\n\n    def build_form(self, form_kwargs=None):\n        if form_kwargs is None:\n            form_kwargs = {}\n\n        # This way the form can always receive a list containing zero or more\n        # facet expressions:\n        form_kwargs[\"selected_facets\"] = self.request.GET.getlist(\"selected_facets\")\n\n        return super().build_form(form_kwargs)\n\n    def extra_context(self):\n        extra = super().extra_context()\n        extra[\"request\"] = self.request\n        extra[\"facets\"] = self.results.facet_counts()\n        return extra\n\n\ndef basic_search(\n    request,\n    template=\"search/search.html\",\n    load_all=True,\n    form_class=ModelSearchForm,\n    searchqueryset=None,\n    extra_context=None,\n    results_per_page=None,\n):\n    \"\"\"\n    A more traditional view that also demonstrate an alternative\n    way to use Haystack.\n\n    Useful as an example of for basing heavily custom views off of.\n\n    Also has the benefit of thread-safety, which the ``SearchView`` class may\n    not be.\n\n    Template:: ``search/search.html``\n    Context::\n        * form\n          An instance of the ``form_class``. (default: ``ModelSearchForm``)\n        * page\n          The current page of search results.\n        * paginator\n          A paginator instance for the results.\n        * query\n          The query received by the form.\n    \"\"\"\n    query = \"\"\n    results = EmptySearchQuerySet()\n\n    if request.GET.get(\"q\"):\n        form = form_class(request.GET, searchqueryset=searchqueryset, load_all=load_all)\n\n        if form.is_valid():\n            query = form.cleaned_data[\"q\"]\n            results = form.search()\n    else:\n        form = form_class(searchqueryset=searchqueryset, load_all=load_all)\n\n    paginator = Paginator(results, results_per_page or RESULTS_PER_PAGE)\n\n    try:\n        page = paginator.page(int(request.GET.get(\"page\", 1)))\n    except InvalidPage:\n        raise Http404(\"No such page of results!\")\n\n    context = {\n        \"form\": form,\n        \"page\": page,\n        \"paginator\": paginator,\n        \"query\": query,\n        \"suggestion\": None,\n    }\n\n    if results.query.backend.include_spelling:\n        context[\"suggestion\"] = form.get_suggestion()\n\n    if extra_context:\n        context.update(extra_context)\n\n    return render(request, template, context)\n"
  },
  {
    "path": "pyproject.toml",
    "content": "[tool.black]\nline_length=88\n\n[tool.isort]\nknown_first_party = [\"haystack\", \"test_haystack\"]\nprofile = \"black\"\nmulti_line_output = 3"
  },
  {
    "path": "setup.cfg",
    "content": "[pep8]\nline_length=88\nexclude=docs\n\n[flake8]\nline_length=88\nexclude=docs,tests\nignore=E203, E501, W503, D\n\n[options]\nsetup_requires =\n  setuptools_scm\n"
  },
  {
    "path": "setup.py",
    "content": "#!/usr/bin/env python\nfrom setuptools import setup\n\ntry:\n    from setuptools import setup\nexcept ImportError:\n    from ez_setup import use_setuptools\n\n    use_setuptools()\n    from setuptools import setup\n\ninstall_requires = [\"Django>=2.2\"]\n\ntests_require = [\n    \"pysolr>=3.7.0\",\n    \"whoosh>=2.5.4,<3.0\",\n    \"python-dateutil\",\n    \"geopy==2.0.0\",\n    \"nose\",\n    \"coverage\",\n    \"requests\",\n]\n\nsetup(\n    name=\"django-haystack\",\n    use_scm_version=True,\n    description=\"Pluggable search for Django.\",\n    author=\"Daniel Lindsley\",\n    author_email=\"daniel@toastdriven.com\",\n    long_description=open(\"README.rst\", \"r\").read(),\n    url=\"http://haystacksearch.org/\",\n    packages=[\n        \"haystack\",\n        \"haystack.backends\",\n        \"haystack.management\",\n        \"haystack.management.commands\",\n        \"haystack.templatetags\",\n        \"haystack.utils\",\n    ],\n    package_data={\n        \"haystack\": [\"templates/panels/*\", \"templates/search_configuration/*\"]\n    },\n    classifiers=[\n        \"Development Status :: 5 - Production/Stable\",\n        \"Environment :: Web Environment\",\n        \"Framework :: Django\",\n        \"Framework :: Django :: 2.2\",\n        \"Framework :: Django :: 3.0\",\n        \"Intended Audience :: Developers\",\n        \"License :: OSI Approved :: BSD License\",\n        \"Operating System :: OS Independent\",\n        \"Programming Language :: Python\",\n        \"Programming Language :: Python :: 3\",\n        \"Programming Language :: Python :: 3.5\",\n        \"Programming Language :: Python :: 3.6\",\n        \"Programming Language :: Python :: 3.7\",\n        \"Programming Language :: Python :: 3.8\",\n        \"Topic :: Utilities\",\n    ],\n    zip_safe=False,\n    install_requires=install_requires,\n    tests_require=tests_require,\n    extras_require={\n        \"elasticsearch\": [\"elasticsearch>=5,<6\"],\n    },\n    test_suite=\"test_haystack.run_tests.run_all\",\n)\n"
  },
  {
    "path": "test_haystack/__init__.py",
    "content": "import os\n\ntest_runner = None\nold_config = None\n\nos.environ[\"DJANGO_SETTINGS_MODULE\"] = \"test_haystack.settings\"\n\n\nimport django\n\ndjango.setup()\n\n\ndef setup():\n    global test_runner\n    global old_config\n\n    from django.test.runner import DiscoverRunner\n\n    test_runner = DiscoverRunner()\n    test_runner.setup_test_environment()\n    old_config = test_runner.setup_databases()\n\n\ndef teardown():\n    test_runner.teardown_databases(old_config)\n    test_runner.teardown_test_environment()\n"
  },
  {
    "path": "test_haystack/core/__init__.py",
    "content": ""
  },
  {
    "path": "test_haystack/core/admin.py",
    "content": "from django.contrib import admin\n\nfrom haystack.admin import SearchModelAdmin\n\nfrom .models import MockModel\n\n\nclass MockModelAdmin(SearchModelAdmin):\n    haystack_connection = \"solr\"\n    date_hierarchy = \"pub_date\"\n    list_display = (\"author\", \"pub_date\")\n\n\nadmin.site.register(MockModel, MockModelAdmin)\n"
  },
  {
    "path": "test_haystack/core/custom_identifier.py",
    "content": "import hashlib\n\n\ndef get_identifier_method(key):\n    \"\"\"\n    Custom get_identifier method used for testing the\n    setting HAYSTACK_IDENTIFIER_MODULE\n    \"\"\"\n\n    if hasattr(key, \"get_custom_haystack_id\"):\n        return key.get_custom_haystack_id()\n    else:\n        key_bytes = key.encode(\"utf-8\")\n        return hashlib.md5(key_bytes).hexdigest()\n"
  },
  {
    "path": "test_haystack/core/fixtures/base_data.json",
    "content": "[\n  {\n    \"pk\": 1,\n    \"model\": \"core.mocktag\",\n    \"fields\": {\n      \"name\": \"primary\"\n    }\n  },\n  {\n    \"pk\": 2,\n    \"model\": \"core.mocktag\",\n    \"fields\": {\n      \"name\": \"secondary\"\n    }\n  },\n  {\n    \"pk\": 1,\n    \"model\": \"core.mockmodel\",\n    \"fields\": {\n      \"author\": \"daniel1\",\n      \"foo\": \"bar\",\n      \"pub_date\": \"2009-03-17 06:00:00\",\n      \"tag\": 1\n    }\n  },\n  {\n    \"pk\": 2,\n    \"model\": \"core.mockmodel\",\n    \"fields\": {\n      \"author\": \"daniel2\",\n      \"foo\": \"bar\",\n      \"pub_date\": \"2009-03-17 07:00:00\",\n      \"tag\": 1\n    }\n  },\n  {\n    \"pk\": 3,\n    \"model\": \"core.mockmodel\",\n    \"fields\": {\n      \"author\": \"daniel3\",\n      \"foo\": \"bar\",\n      \"pub_date\": \"2009-03-17 08:00:00\",\n      \"tag\": 2\n    }\n  },\n  {\n    \"pk\": \"sometext\",\n    \"model\": \"core.charpkmockmodel\",\n    \"fields\": {\n    }\n  },\n  {\n    \"pk\": \"1234\",\n    \"model\": \"core.charpkmockmodel\",\n    \"fields\": {\n    }\n  },\n  {\n    \"pk\": 1,\n    \"model\": \"core.afifthmockmodel\",\n    \"fields\": {\n      \"author\": \"sam1\",\n      \"deleted\": false\n    }\n  },\n  {\n    \"pk\": 2,\n    \"model\": \"core.afifthmockmodel\",\n    \"fields\": {\n      \"author\": \"sam2\",\n      \"deleted\": true\n    }\n  },\n  {\n    \"pk\": \"53554c58-7051-4350-bcc9-dad75eb248a9\",\n    \"model\": \"core.uuidmockmodel\",\n    \"fields\": {\n      \"characteristics\": \"some text that was indexed\"\n    }\n  },\n  {\n    \"pk\": \"77554c58-7051-4350-bcc9-dad75eb24888\",\n    \"model\": \"core.uuidmockmodel\",\n    \"fields\": {\n      \"characteristics\": \"more text that was indexed\"\n    }\n  }\n]\n"
  },
  {
    "path": "test_haystack/core/fixtures/bulk_data.json",
    "content": "[\n  {\n    \"pk\": 1,\n    \"model\": \"core.mocktag\",\n    \"fields\": {\n      \"name\": \"search_test\"\n    }\n  },\n  {\n    \"pk\": 1,\n    \"model\": \"core.mockmodel\",\n    \"fields\": {\n      \"author\": \"daniel1\",\n      \"foo\": \"Registering indexes in Haystack is very similar to registering models and ``ModelAdmin`` classes in the `Django admin site`_.  If you want to override the default indexing behavior for your model you can specify your own ``SearchIndex`` class.  This is useful for ensuring that future-dated or non-live content is not indexed and searchable. Our ``Note`` model has a ``pub_date`` field, so let's update our code to include our own ``SearchIndex`` to exclude indexing future-dated notes:\",\n      \"pub_date\": \"2009-06-18 06:00:00\",\n      \"tag\": 1\n    }\n  },\n  {\n    \"pk\": 2,\n    \"model\": \"core.mockmodel\",\n    \"fields\": {\n      \"author\": \"daniel2\",\n      \"foo\": \"In addition, you may specify other fields to be populated along with the document. In this case, we also index the user who authored the document as well as the date the document was published. The variable you assign the SearchField to should directly map to the field your search backend is expecting. You instantiate most search fields with a parameter that points to the attribute of the object to populate that field with.\",\n      \"pub_date\": \"2009-07-17 00:30:00\",\n      \"tag\": 1\n    }\n  },\n  {\n    \"pk\": 3,\n    \"model\": \"core.mockmodel\",\n    \"fields\": {\n      \"author\": \"daniel3\",\n      \"foo\": \"Every custom ``SearchIndex`` requires there be one and only one field with ``document=True``.  This is the primary field that will get passed to the backend for indexing. For this field, you'll then need to create a template at  ``search/indexes/myapp/note_text.txt``. This allows you to customize the document  that will be passed to the search backend for indexing. A sample template might look like:\",\n      \"pub_date\": \"2009-06-18 08:00:00\",\n      \"tag\": 1\n    }\n  },\n  {\n    \"pk\": 4,\n    \"model\": \"core.mockmodel\",\n    \"fields\": {\n      \"author\": \"daniel3\",\n      \"foo\": \"The exception to this is the TemplateField class. This take either no arguments or an explicit template name to populate their contents. You can find more information about them in the SearchIndex API reference.\",\n      \"pub_date\": \"2009-07-17 01:30:00\",\n      \"tag\": 1\n    }\n  },\n  {\n    \"pk\": 5,\n    \"model\": \"core.mockmodel\",\n    \"fields\": {\n      \"author\": \"daniel1\",\n      \"foo\": \"This will pull in the default URLconf for Haystack. It consists of a single URLconf that points to a SearchView instance. You can change this class’s behavior by passing it any of several keyword arguments or override it entirely with your own view.\",\n      \"pub_date\": \"2009-07-17 02:30:00\",\n      \"tag\": 1\n    }\n  },\n  {\n    \"pk\": 6,\n    \"model\": \"core.mockmodel\",\n    \"fields\": {\n      \"author\": \"daniel1\",\n      \"foo\": \"This will create a default SearchIndex instance, search through all of your INSTALLED_APPS for search_indexes.py and register all SearchIndexes with the default SearchIndex. If autodiscovery and inclusion of all indexes is not desirable, you can manually register models in the following manner:\",\n      \"pub_date\": \"2009-07-17 03:30:00\",\n      \"tag\": 1\n    }\n  },\n  {\n    \"pk\": 7,\n    \"model\": \"core.mockmodel\",\n    \"fields\": {\n      \"author\": \"daniel1\",\n      \"foo\": \"The SearchBackend class handles interaction directly with the backend. The search query it performs is usually fed to it from a SearchQuery class that has been built for that backend. This class must be at least partially implemented on a per-backend basis and is usually accompanied by a SearchQuery class within the same module.\",\n      \"pub_date\": \"2009-07-17 04:30:00\",\n      \"tag\": 1\n    }\n  },\n  {\n    \"pk\": 8,\n    \"model\": \"core.mockmodel\",\n    \"fields\": {\n      \"author\": \"daniel2\",\n      \"foo\": \"Takes a query to search on and returns dictionary. The query should be a string that is appropriate syntax for the backend. The returned dictionary should contain the keys ‘results’ and ‘hits’. The ‘results’ value should be an iterable of populated SearchResult objects. The ‘hits’ should be an integer count of the number of matched results the search backend found. This method MUST be implemented by each backend, as it will be highly specific to each one.\",\n      \"pub_date\": \"2009-07-17 05:30:00\",\n      \"tag\": 1\n    }\n  },\n  {\n    \"pk\": 9,\n    \"model\": \"core.mockmodel\",\n    \"fields\": {\n      \"author\": \"daniel1\",\n      \"foo\": \"The SearchQuery class acts as an intermediary between SearchQuerySet‘s abstraction and SearchBackend‘s actual search. Given the metadata provided by SearchQuerySet, SearchQuery build the actual query and interacts with the SearchBackend on SearchQuerySet‘s behalf. This class must be at least partially implemented on a per-backend basis, as portions are highly specific to the backend. It usually is bundled with the accompanying SearchBackend.\",\n      \"pub_date\": \"2009-07-17 06:30:00\",\n      \"tag\": 1\n    }\n  },\n  {\n    \"pk\": 10,\n    \"model\": \"core.mockmodel\",\n    \"fields\": {\n      \"author\": \"daniel3\",\n      \"foo\": \"Most people will NOT have to use this class directly. SearchQuerySet handles all interactions with SearchQuery objects and provides a nicer interface to work with. Should you need advanced/custom behavior, you can supply your version of SearchQuery that overrides/extends the class in the manner you see fit. SearchQuerySet objects take a kwarg parameter query where you can pass in your class.\",\n      \"pub_date\": \"2009-07-17 07:30:00\",\n      \"tag\": 1\n    }\n  },\n  {\n    \"pk\": 11,\n    \"model\": \"core.mockmodel\",\n    \"fields\": {\n      \"author\": \"daniel1\",\n      \"foo\": \"The SearchQuery object maintains a list of QueryFilter objects. Each filter object supports what field it looks up against, what kind of lookup (i.e. the __’s), what value it’s looking for and if it’s a AND/OR/NOT. The SearchQuery object’s “build_query” method should then iterate over that list and convert that to a valid query for the search backend.\",\n      \"pub_date\": \"2009-07-17 08:30:00\",\n      \"tag\": 1\n    }\n  },\n  {\n    \"pk\": 12,\n    \"model\": \"core.mockmodel\",\n    \"fields\": {\n      \"author\": \"daniel2\",\n      \"foo\": \"The SearchSite provides a way to collect the SearchIndexes that are relevant to the current site, much like ModelAdmins in the admin app. This allows you to register indexes on models you don’t control (reusable apps, django.contrib, etc.) as well as customize on a per-site basis what indexes should be available (different indexes for different sites, same codebase).\",\n      \"pub_date\": \"2009-07-17 09:30:00\",\n      \"tag\": 1\n    }\n  },\n  {\n    \"pk\": 13,\n    \"model\": \"core.mockmodel\",\n    \"fields\": {\n      \"author\": \"daniel3\",\n      \"foo\": \"If you need to narrow the indexes that get registered, you will need to manipulate a SearchSite. There are two ways to go about this, via either register or unregister. If you want most of the indexes but want to forgo a specific one(s), you can setup the main site via autodiscover then simply unregister the one(s) you don’t want.:\",\n      \"pub_date\": \"2009-07-17 10:30:00\",\n      \"tag\": 1\n    }\n  },\n  {\n    \"pk\": 14,\n    \"model\": \"core.mockmodel\",\n    \"fields\": {\n      \"author\": \"daniel2\",\n      \"foo\": \"The SearchIndex class allows the application developer a way to provide data to the backend in a structured format. Developers familiar with Django’s Form or Model classes should find the syntax for indexes familiar. This class is arguably the most important part of integrating Haystack into your application, as it has a large impact on the quality of the search results and how easy it is for users to find what they’re looking for. Care and effort should be put into making your indexes the best they can be.\",\n      \"pub_date\": \"2009-07-17 11:30:00\",\n      \"tag\": 1\n    }\n  },\n  {\n    \"pk\": 15,\n    \"model\": \"core.mockmodel\",\n    \"fields\": {\n      \"author\": \"daniel2\",\n      \"foo\": \"Unlike relational databases, most search engines supported by Haystack are primarily document-based. They focus on a single text blob which they tokenize, analyze and index. When searching, this field is usually the primary one that is searched. Further, the schema used by most engines is the same for all types of data added, unlike a relational database that has a table schema for each chunk of data. It may be helpful to think of your search index as something closer to a key-value store instead of imagining it in terms of a RDBMS.\",\n      \"pub_date\": \"2009-07-17 12:30:00\",\n      \"tag\": 1\n    }\n  },\n  {\n    \"pk\": 16,\n    \"model\": \"core.mockmodel\",\n    \"fields\": {\n      \"author\": \"daniel3\",\n      \"foo\": \"Common uses include storing pertinent data information, categorizations of the document, author information and related data. By adding fields for these pieces of data, you provide a means to further narrow/filter search terms. This can be useful from either a UI perspective (a better advanced search form) or from a developer standpoint (section-dependent search, off-loading certain tasks to search, et cetera).\",\n      \"pub_date\": \"2009-07-17 13:30:00\",\n      \"tag\": 1\n    }\n  },\n  {\n    \"pk\": 17,\n    \"model\": \"core.mockmodel\",\n    \"fields\": {\n      \"author\": \"daniel3\",\n      \"foo\": \"Most search engines that were candidates for inclusion in Haystack all had a central concept of a document that they indexed. These documents form a corpus within which to primarily search. Because this ideal is so central and most of Haystack is designed to have pluggable backends, it is important to ensure that all engines have at least a bare minimum of the data they need to function.\",\n      \"pub_date\": \"2009-07-17 14:30:00\",\n      \"tag\": 1\n    }\n  },\n  {\n    \"pk\": 18,\n    \"model\": \"core.mockmodel\",\n    \"fields\": {\n      \"author\": \"daniel1\",\n      \"foo\": \"As a result, when creating a SearchIndex, at least one field must be marked with document=True. This signifies to Haystack that whatever is placed in this field while indexing is to be the primary text the search engine indexes. The name of this field can be almost anything, but text is one of the more common names used.\",\n      \"pub_date\": \"2009-07-17 15:30:00\",\n      \"tag\": 1\n    }\n  },\n  {\n    \"pk\": 19,\n    \"model\": \"core.mockmodel\",\n    \"fields\": {\n      \"author\": \"daniel3\",\n      \"foo\": \"One shortcoming of the use of search is that you rarely have all or the most up-to-date information about an object in the index. As a result, when retrieving search results, you will likely have to access the object in the database to provide better information. However, this can also hit the database quite heavily (think .get(pk=result.id) per object). If your search is popular, this can lead to a big performance hit. There are two ways to prevent this. The first way is SearchQuerySet.load_all, which tries to group all similar objects and pull them though one query instead of many. This still hits the DB and incurs a performance penalty.\",\n      \"pub_date\": \"2009-07-17 16:30:00\",\n      \"tag\": 1\n    }\n  },\n  {\n    \"pk\": 20,\n    \"model\": \"core.mockmodel\",\n    \"fields\": {\n      \"author\": \"daniel2\",\n      \"foo\": \"The other option is to leverage stored fields. By default, all fields in Haystack are both indexed (searchable by the engine) and stored (retained by the engine and presented in the results). By using a stored field, you can store commonly used data in such a way that you don’t need to hit the database when processing the search result to get more information. By the way: Jenny's number is 867-5309\",\n      \"pub_date\": \"2009-07-17 17:30:00\",\n      \"tag\": 1\n    }\n  },\n  {\n    \"pk\": 21,\n    \"model\": \"core.mockmodel\",\n    \"fields\": {\n      \"author\": \"daniel2\",\n      \"foo\": \"For example, one great way to leverage this is to pre-rendering an object’s search result template DURING indexing. You define an additional field, render a template with it and it follows the main indexed record into the index. Then, when that record is pulled when it matches a query, you can simply display the contents of that field, which avoids the database hit.:\",\n      \"pub_date\": \"2009-07-17 18:30:00\",\n      \"tag\": 1\n    }\n  },\n  {\n    \"pk\": 22,\n    \"model\": \"core.mockmodel\",\n    \"fields\": {\n      \"author\": \"daniel3\",\n      \"foo\": \"However, sometimes, even more control over what gets placed in your index is needed. To facilitate this, SearchIndex objects have a ‘preparation’ stage that populates data just before it is indexed. You can hook into this phase in several ways. This should be very familiar to developers who have used Django’s forms before as it loosely follows similar concepts, though the emphasis here is less on cleansing data from user input and more on making the data friendly to the search backend.\",\n      \"pub_date\": \"2009-07-17 19:30:00\",\n      \"tag\": 1\n    }\n  },\n  {\n    \"pk\": 23,\n    \"model\": \"core.mockmodel\",\n    \"fields\": {\n      \"author\": \"daniel3\",\n      \"foo\": \"Each SearchIndex gets a prepare method, which handles collecting all the data. This method should return a dictionary that will be the final data used by the search backend. Overriding this method is useful if you need to collect more than one piece of data or need to incorporate additional data that is not well represented by a single SearchField. An example might look like:\",\n      \"pub_date\": \"2009-07-17 20:30:00\",\n      \"tag\": 1\n    }\n  },\n  {\n    \"pk\": 1,\n    \"model\": \"core.anothermockmodel\",\n    \"fields\": {\n      \"author\": \"daniel3\",\n      \"pub_date\": \"2009-07-17 21:30:00\"\n    }\n  },\n  {\n    \"pk\": 2,\n    \"model\": \"core.anothermockmodel\",\n    \"fields\": {\n      \"author\": \"daniel3\",\n      \"pub_date\": \"2009-07-17 22:30:00\"\n    }\n  },\n  {\n    \"pk\": 1,\n    \"model\": \"core.ScoreMockModel\",\n    \"fields\": {\n      \"score\": \"42\"\n    }\n  }\n]\n"
  },
  {
    "path": "test_haystack/core/models.py",
    "content": "# A couple models for Haystack to test with.\nimport datetime\nimport uuid\n\nfrom django.db import models\n\n\nclass MockTag(models.Model):\n    name = models.CharField(max_length=32)\n\n    def __str__(self):\n        return self.name\n\n\nclass MockModel(models.Model):\n    author = models.CharField(max_length=255)\n    foo = models.CharField(max_length=255, blank=True)\n    pub_date = models.DateTimeField(default=datetime.datetime.now)\n    tag = models.ForeignKey(MockTag, models.CASCADE)\n\n    def __str__(self):\n        return self.author\n\n    def hello(self):\n        return \"World!\"\n\n\nclass UUIDMockModel(models.Model):\n    id = models.UUIDField(primary_key=True, default=uuid.uuid4, editable=False)\n    characteristics = models.TextField()\n\n    def __str__(self):\n        return str(self.id)\n\n\nclass AnotherMockModel(models.Model):\n    author = models.CharField(max_length=255)\n    pub_date = models.DateTimeField(default=datetime.datetime.now)\n\n    def __str__(self):\n        return self.author\n\n\nclass AThirdMockModel(AnotherMockModel):\n    average_delay = models.FloatField(default=0.0)\n    view_count = models.PositiveIntegerField(default=0)\n\n\nclass CharPKMockModel(models.Model):\n    key = models.CharField(primary_key=True, max_length=10)\n\n\nclass AFourthMockModel(models.Model):\n    author = models.CharField(max_length=255)\n    editor = models.CharField(max_length=255)\n    pub_date = models.DateTimeField(default=datetime.datetime.now)\n\n    def __str__(self):\n        return self.author\n\n\nclass SoftDeleteManager(models.Manager):\n    def get_queryset(self):\n        return super().get_queryset().filter(deleted=False)\n\n    def complete_set(self):\n        return super().get_queryset()\n\n\nclass AFifthMockModel(models.Model):\n    author = models.CharField(max_length=255)\n    deleted = models.BooleanField(default=False)\n\n    objects = SoftDeleteManager()\n\n    def __str__(self):\n        return self.author\n\n\nclass ASixthMockModel(models.Model):\n    name = models.CharField(max_length=255)\n    lat = models.FloatField()\n    lon = models.FloatField()\n\n    def __str__(self):\n        return self.name\n\n\nclass ScoreMockModel(models.Model):\n    score = models.CharField(max_length=10)\n\n    def __str__(self):\n        return self.score\n\n\nclass ManyToManyLeftSideModel(models.Model):\n    related_models = models.ManyToManyField(\"ManyToManyRightSideModel\")\n\n\nclass ManyToManyRightSideModel(models.Model):\n    name = models.CharField(max_length=32, default=\"Default name\")\n\n    def __str__(self):\n        return self.name\n\n\nclass OneToManyLeftSideModel(models.Model):\n    pass\n\n\nclass OneToManyRightSideModel(models.Model):\n    left_side = models.ForeignKey(\n        OneToManyLeftSideModel, models.CASCADE, related_name=\"right_side\"\n    )\n"
  },
  {
    "path": "test_haystack/core/templates/404.html",
    "content": "{% extends 'base.html' %}"
  },
  {
    "path": "test_haystack/core/templates/base.html",
    "content": ""
  },
  {
    "path": "test_haystack/core/templates/search/indexes/bar.txt",
    "content": "BAR!\n"
  },
  {
    "path": "test_haystack/core/templates/search/indexes/core/mockmodel_content.txt",
    "content": "Indexed!\n{{ object.pk }}"
  },
  {
    "path": "test_haystack/core/templates/search/indexes/core/mockmodel_extra.txt",
    "content": "Stored!\n{{ object.pk }}"
  },
  {
    "path": "test_haystack/core/templates/search/indexes/core/mockmodel_template.txt",
    "content": "Indexed!\n{{ object.pk }}"
  },
  {
    "path": "test_haystack/core/templates/search/indexes/core/mockmodel_text.txt",
    "content": "Indexed!\n{{ object.pk }}"
  },
  {
    "path": "test_haystack/core/templates/search/indexes/foo.txt",
    "content": "FOO!\n"
  },
  {
    "path": "test_haystack/core/templates/search/search.html",
    "content": "{% extends 'base.html' %}"
  },
  {
    "path": "test_haystack/core/templates/test_suggestion.html",
    "content": "Suggestion: {{ suggestion }}"
  },
  {
    "path": "test_haystack/core/urls.py",
    "content": "from django.contrib import admin\nfrom django.urls import include, path\n\nfrom haystack.forms import FacetedSearchForm\nfrom haystack.query import SearchQuerySet\nfrom haystack.views import FacetedSearchView, SearchView, basic_search\n\nadmin.autodiscover()\n\n\nurlpatterns = [\n    path(\"\", SearchView(load_all=False), name=\"haystack_search\"),\n    path(\"admin/\", admin.site.urls),\n    path(\"basic/\", basic_search, {\"load_all\": False}, name=\"haystack_basic_search\"),\n    path(\n        \"faceted/\",\n        FacetedSearchView(\n            searchqueryset=SearchQuerySet().facet(\"author\"),\n            form_class=FacetedSearchForm,\n        ),\n        name=\"haystack_faceted_search\",\n    ),\n]\n\nurlpatterns += [\n    path(\n        \"\",\n        include((\"test_haystack.test_app_without_models.urls\", \"app-without-models\")),\n    )\n]\n"
  },
  {
    "path": "test_haystack/discovery/__init__.py",
    "content": ""
  },
  {
    "path": "test_haystack/discovery/models.py",
    "content": "from django.db import models\n\n\nclass Foo(models.Model):\n    title = models.CharField(max_length=255)\n    body = models.TextField()\n\n    def __str__(self):\n        return self.title\n\n\nclass Bar(models.Model):\n    author = models.CharField(max_length=255)\n    content = models.TextField()\n\n    def __str__(self):\n        return self.author\n"
  },
  {
    "path": "test_haystack/discovery/search_indexes.py",
    "content": "from haystack import indexes\nfrom test_haystack.discovery.models import Bar, Foo\n\n\nclass FooIndex(indexes.SearchIndex, indexes.Indexable):\n    text = indexes.CharField(document=True, model_attr=\"body\")\n\n    def get_model(self):\n        return Foo\n\n\nclass BarIndex(indexes.SearchIndex, indexes.Indexable):\n    text = indexes.CharField(document=True)\n\n    def get_model(self):\n        return Bar\n"
  },
  {
    "path": "test_haystack/discovery/templates/search/indexes/bar_text.txt",
    "content": "{{ object.title }}\n{{ object.body }}"
  },
  {
    "path": "test_haystack/elasticsearch2_tests/__init__.py",
    "content": "import unittest\nimport warnings\n\nfrom django.conf import settings\n\nfrom haystack.utils import log as logging\n\nwarnings.simplefilter(\"ignore\", Warning)\n\n\ndef setup():\n    log = logging.getLogger(\"haystack\")\n    try:\n        import elasticsearch\n\n        if not ((2, 0, 0) <= elasticsearch.__version__ < (3, 0, 0)):\n            raise ImportError\n        from elasticsearch import Elasticsearch, exceptions\n    except ImportError:\n        log.error(\n            \"Skipping ElasticSearch 2 tests: 'elasticsearch>=2.0.0,<3.0.0' not installed.\"\n        )\n        raise unittest.SkipTest(\"'elasticsearch>=2.0.0,<3.0.0' not installed.\")\n\n    url = settings.HAYSTACK_CONNECTIONS[\"elasticsearch\"][\"URL\"]\n    es = Elasticsearch(url)\n    try:\n        es.info()\n    except exceptions.ConnectionError as e:\n        log.error(\"elasticsearch not running on %r\" % url, exc_info=True)\n        raise unittest.SkipTest(\"elasticsearch not running on %r\" % url, e)\n"
  },
  {
    "path": "test_haystack/elasticsearch2_tests/test_backend.py",
    "content": "import datetime\nimport logging as std_logging\nimport operator\nimport pickle\nimport unittest\nfrom decimal import Decimal\n\nimport elasticsearch\nfrom django.apps import apps\nfrom django.conf import settings\nfrom django.test import TestCase\nfrom django.test.utils import override_settings\n\nfrom haystack import connections, indexes, reset_search_queries\nfrom haystack.exceptions import SkipDocument\nfrom haystack.inputs import AutoQuery\nfrom haystack.models import SearchResult\nfrom haystack.query import SQ, RelatedSearchQuerySet, SearchQuerySet\nfrom haystack.utils import log as logging\nfrom haystack.utils.loading import UnifiedIndex\n\nfrom ..core.models import AFourthMockModel, AnotherMockModel, ASixthMockModel, MockModel\nfrom ..mocks import MockSearchResult\n\n\ndef clear_elasticsearch_index():\n    # Wipe it clean.\n    raw_es = elasticsearch.Elasticsearch(\n        settings.HAYSTACK_CONNECTIONS[\"elasticsearch\"][\"URL\"]\n    )\n    try:\n        raw_es.indices.delete(\n            index=settings.HAYSTACK_CONNECTIONS[\"elasticsearch\"][\"INDEX_NAME\"]\n        )\n        raw_es.indices.refresh()\n    except elasticsearch.TransportError:\n        pass\n\n    # Since we've just completely deleted the index, we'll reset setup_complete so the next access will\n    # correctly define the mappings:\n    connections[\"elasticsearch\"].get_backend().setup_complete = False\n\n\nclass Elasticsearch2MockSearchIndex(indexes.SearchIndex, indexes.Indexable):\n    text = indexes.CharField(document=True, use_template=True)\n    name = indexes.CharField(model_attr=\"author\", faceted=True)\n    pub_date = indexes.DateTimeField(model_attr=\"pub_date\")\n\n    def get_model(self):\n        return MockModel\n\n\nclass Elasticsearch2MockSearchIndexWithSkipDocument(Elasticsearch2MockSearchIndex):\n    def prepare_text(self, obj):\n        if obj.author == \"daniel3\":\n            raise SkipDocument\n        return \"Indexed!\\n%s\" % obj.id\n\n\nclass Elasticsearch2MockSpellingIndex(indexes.SearchIndex, indexes.Indexable):\n    text = indexes.CharField(document=True)\n    name = indexes.CharField(model_attr=\"author\", faceted=True)\n    pub_date = indexes.DateTimeField(model_attr=\"pub_date\")\n\n    def get_model(self):\n        return MockModel\n\n    def prepare_text(self, obj):\n        return obj.foo\n\n\nclass Elasticsearch2MaintainTypeMockSearchIndex(indexes.SearchIndex, indexes.Indexable):\n    text = indexes.CharField(document=True, use_template=True)\n    month = indexes.CharField(indexed=False)\n    pub_date = indexes.DateTimeField(model_attr=\"pub_date\")\n\n    def prepare_month(self, obj):\n        return \"%02d\" % obj.pub_date.month\n\n    def get_model(self):\n        return MockModel\n\n\nclass Elasticsearch2MockModelSearchIndex(indexes.SearchIndex, indexes.Indexable):\n    text = indexes.CharField(model_attr=\"foo\", document=True)\n    name = indexes.CharField(model_attr=\"author\")\n    pub_date = indexes.DateTimeField(model_attr=\"pub_date\")\n\n    def get_model(self):\n        return MockModel\n\n\nclass Elasticsearch2AnotherMockModelSearchIndex(indexes.SearchIndex, indexes.Indexable):\n    text = indexes.CharField(document=True)\n    name = indexes.CharField(model_attr=\"author\")\n    pub_date = indexes.DateTimeField(model_attr=\"pub_date\")\n\n    def get_model(self):\n        return AnotherMockModel\n\n    def prepare_text(self, obj):\n        return \"You might be searching for the user %s\" % obj.author\n\n\nclass Elasticsearch2BoostMockSearchIndex(indexes.SearchIndex, indexes.Indexable):\n    text = indexes.CharField(\n        document=True,\n        use_template=True,\n        template_name=\"search/indexes/core/mockmodel_template.txt\",\n    )\n    author = indexes.CharField(model_attr=\"author\", weight=2.0)\n    editor = indexes.CharField(model_attr=\"editor\")\n    pub_date = indexes.DateTimeField(model_attr=\"pub_date\")\n\n    def get_model(self):\n        return AFourthMockModel\n\n    def prepare(self, obj):\n        data = super().prepare(obj)\n\n        if obj.pk == 4:\n            data[\"boost\"] = 5.0\n\n        return data\n\n\nclass Elasticsearch2FacetingMockSearchIndex(indexes.SearchIndex, indexes.Indexable):\n    text = indexes.CharField(document=True)\n    author = indexes.CharField(model_attr=\"author\", faceted=True)\n    editor = indexes.CharField(model_attr=\"editor\", faceted=True)\n    pub_date = indexes.DateField(model_attr=\"pub_date\", faceted=True)\n    facet_field = indexes.FacetCharField(model_attr=\"author\")\n\n    def prepare_text(self, obj):\n        return \"%s %s\" % (obj.author, obj.editor)\n\n    def get_model(self):\n        return AFourthMockModel\n\n\nclass Elasticsearch2RoundTripSearchIndex(indexes.SearchIndex, indexes.Indexable):\n    text = indexes.CharField(document=True, default=\"\")\n    name = indexes.CharField()\n    is_active = indexes.BooleanField()\n    post_count = indexes.IntegerField()\n    average_rating = indexes.FloatField()\n    price = indexes.DecimalField()\n    pub_date = indexes.DateField()\n    created = indexes.DateTimeField()\n    tags = indexes.MultiValueField()\n    sites = indexes.MultiValueField()\n\n    def get_model(self):\n        return MockModel\n\n    def prepare(self, obj):\n        prepped = super().prepare(obj)\n        prepped.update(\n            {\n                \"text\": \"This is some example text.\",\n                \"name\": \"Mister Pants\",\n                \"is_active\": True,\n                \"post_count\": 25,\n                \"average_rating\": 3.6,\n                \"price\": Decimal(\"24.99\"),\n                \"pub_date\": datetime.date(2009, 11, 21),\n                \"created\": datetime.datetime(2009, 11, 21, 21, 31, 00),\n                \"tags\": [\"staff\", \"outdoor\", \"activist\", \"scientist\"],\n                \"sites\": [3, 5, 1],\n            }\n        )\n        return prepped\n\n\nclass Elasticsearch2ComplexFacetsMockSearchIndex(\n    indexes.SearchIndex, indexes.Indexable\n):\n    text = indexes.CharField(document=True, default=\"\")\n    name = indexes.CharField(faceted=True)\n    is_active = indexes.BooleanField(faceted=True)\n    post_count = indexes.IntegerField()\n    post_count_i = indexes.FacetIntegerField(facet_for=\"post_count\")\n    average_rating = indexes.FloatField(faceted=True)\n    pub_date = indexes.DateField(faceted=True)\n    created = indexes.DateTimeField(faceted=True)\n    sites = indexes.MultiValueField(faceted=True)\n\n    def get_model(self):\n        return MockModel\n\n\nclass Elasticsearch2AutocompleteMockModelSearchIndex(\n    indexes.SearchIndex, indexes.Indexable\n):\n    text = indexes.CharField(model_attr=\"foo\", document=True)\n    name = indexes.CharField(model_attr=\"author\")\n    pub_date = indexes.DateTimeField(model_attr=\"pub_date\")\n    text_auto = indexes.EdgeNgramField(model_attr=\"foo\")\n    name_auto = indexes.EdgeNgramField(model_attr=\"author\")\n\n    def get_model(self):\n        return MockModel\n\n\nclass Elasticsearch2SpatialSearchIndex(indexes.SearchIndex, indexes.Indexable):\n    text = indexes.CharField(model_attr=\"name\", document=True)\n    location = indexes.LocationField()\n\n    def prepare_location(self, obj):\n        return \"%s,%s\" % (obj.lat, obj.lon)\n\n    def get_model(self):\n        return ASixthMockModel\n\n\nclass TestSettings(TestCase):\n    def test_kwargs_are_passed_on(self):\n        from haystack.backends.elasticsearch_backend import ElasticsearchSearchBackend\n\n        backend = ElasticsearchSearchBackend(\n            \"alias\",\n            **{\n                \"URL\": settings.HAYSTACK_CONNECTIONS[\"elasticsearch\"][\"URL\"],\n                \"INDEX_NAME\": \"testing\",\n                \"KWARGS\": {\"max_retries\": 42},\n            }\n        )\n\n        self.assertEqual(backend.conn.transport.max_retries, 42)\n\n\nclass Elasticsearch2SearchBackendTestCase(TestCase):\n    def setUp(self):\n        super().setUp()\n\n        # Wipe it clean.\n        self.raw_es = elasticsearch.Elasticsearch(\n            settings.HAYSTACK_CONNECTIONS[\"elasticsearch\"][\"URL\"]\n        )\n        clear_elasticsearch_index()\n\n        # Stow.\n        self.old_ui = connections[\"elasticsearch\"].get_unified_index()\n        self.ui = UnifiedIndex()\n        self.smmi = Elasticsearch2MockSearchIndex()\n        self.smmidni = Elasticsearch2MockSearchIndexWithSkipDocument()\n        self.smtmmi = Elasticsearch2MaintainTypeMockSearchIndex()\n        self.ui.build(indexes=[self.smmi])\n        connections[\"elasticsearch\"]._index = self.ui\n        self.sb = connections[\"elasticsearch\"].get_backend()\n\n        # Force the backend to rebuild the mapping each time.\n        self.sb.existing_mapping = {}\n        self.sb.setup()\n\n        self.sample_objs = []\n\n        for i in range(1, 4):\n            mock = MockModel()\n            mock.id = i\n            mock.author = \"daniel%s\" % i\n            mock.pub_date = datetime.date(2009, 2, 25) - datetime.timedelta(days=i)\n            self.sample_objs.append(mock)\n\n    def tearDown(self):\n        connections[\"elasticsearch\"]._index = self.old_ui\n        super().tearDown()\n        self.sb.silently_fail = True\n\n    def raw_search(self, query):\n        try:\n            return self.raw_es.search(\n                q=\"*:*\",\n                index=settings.HAYSTACK_CONNECTIONS[\"elasticsearch\"][\"INDEX_NAME\"],\n            )\n        except elasticsearch.TransportError:\n            return {}\n\n    def test_non_silent(self):\n        bad_sb = connections[\"elasticsearch\"].backend(\n            \"bad\",\n            URL=\"http://omg.wtf.bbq:1000/\",\n            INDEX_NAME=\"whatver\",\n            SILENTLY_FAIL=False,\n            TIMEOUT=1,\n        )\n\n        try:\n            bad_sb.update(self.smmi, self.sample_objs)\n            self.fail()\n        except:\n            pass\n\n        try:\n            bad_sb.remove(\"core.mockmodel.1\")\n            self.fail()\n        except:\n            pass\n\n        try:\n            bad_sb.clear()\n            self.fail()\n        except:\n            pass\n\n        try:\n            bad_sb.search(\"foo\")\n            self.fail()\n        except:\n            pass\n\n    def test_update_no_documents(self):\n        url = settings.HAYSTACK_CONNECTIONS[\"elasticsearch\"][\"URL\"]\n        index_name = settings.HAYSTACK_CONNECTIONS[\"elasticsearch\"][\"INDEX_NAME\"]\n\n        sb = connections[\"elasticsearch\"].backend(\n            \"elasticsearch\", URL=url, INDEX_NAME=index_name, SILENTLY_FAIL=True\n        )\n        self.assertEqual(sb.update(self.smmi, []), None)\n\n        sb = connections[\"elasticsearch\"].backend(\n            \"elasticsearch\", URL=url, INDEX_NAME=index_name, SILENTLY_FAIL=False\n        )\n        try:\n            sb.update(self.smmi, [])\n            self.fail()\n        except:\n            pass\n\n    def test_update(self):\n        self.sb.update(self.smmi, self.sample_objs)\n\n        # Check what Elasticsearch thinks is there.\n        self.assertEqual(self.raw_search(\"*:*\")[\"hits\"][\"total\"], 3)\n        self.assertEqual(\n            sorted(\n                [res[\"_source\"] for res in self.raw_search(\"*:*\")[\"hits\"][\"hits\"]],\n                key=lambda x: x[\"id\"],\n            ),\n            [\n                {\n                    \"django_id\": \"1\",\n                    \"django_ct\": \"core.mockmodel\",\n                    \"name\": \"daniel1\",\n                    \"name_exact\": \"daniel1\",\n                    \"text\": \"Indexed!\\n1\",\n                    \"pub_date\": \"2009-02-24T00:00:00\",\n                    \"id\": \"core.mockmodel.1\",\n                },\n                {\n                    \"django_id\": \"2\",\n                    \"django_ct\": \"core.mockmodel\",\n                    \"name\": \"daniel2\",\n                    \"name_exact\": \"daniel2\",\n                    \"text\": \"Indexed!\\n2\",\n                    \"pub_date\": \"2009-02-23T00:00:00\",\n                    \"id\": \"core.mockmodel.2\",\n                },\n                {\n                    \"django_id\": \"3\",\n                    \"django_ct\": \"core.mockmodel\",\n                    \"name\": \"daniel3\",\n                    \"name_exact\": \"daniel3\",\n                    \"text\": \"Indexed!\\n3\",\n                    \"pub_date\": \"2009-02-22T00:00:00\",\n                    \"id\": \"core.mockmodel.3\",\n                },\n            ],\n        )\n\n    def test_update_with_SkipDocument_raised(self):\n        self.sb.update(self.smmidni, self.sample_objs)\n\n        # Check what Elasticsearch thinks is there.\n        res = self.raw_search(\"*:*\")[\"hits\"]\n        self.assertEqual(res[\"total\"], 2)\n        self.assertListEqual(\n            sorted([x[\"_source\"][\"id\"] for x in res[\"hits\"]]),\n            [\"core.mockmodel.1\", \"core.mockmodel.2\"],\n        )\n\n    def test_remove(self):\n        self.sb.update(self.smmi, self.sample_objs)\n        self.assertEqual(self.raw_search(\"*:*\")[\"hits\"][\"total\"], 3)\n\n        self.sb.remove(self.sample_objs[0])\n        self.assertEqual(self.raw_search(\"*:*\")[\"hits\"][\"total\"], 2)\n        self.assertEqual(\n            sorted(\n                [res[\"_source\"] for res in self.raw_search(\"*:*\")[\"hits\"][\"hits\"]],\n                key=operator.itemgetter(\"django_id\"),\n            ),\n            [\n                {\n                    \"django_id\": \"2\",\n                    \"django_ct\": \"core.mockmodel\",\n                    \"name\": \"daniel2\",\n                    \"name_exact\": \"daniel2\",\n                    \"text\": \"Indexed!\\n2\",\n                    \"pub_date\": \"2009-02-23T00:00:00\",\n                    \"id\": \"core.mockmodel.2\",\n                },\n                {\n                    \"django_id\": \"3\",\n                    \"django_ct\": \"core.mockmodel\",\n                    \"name\": \"daniel3\",\n                    \"name_exact\": \"daniel3\",\n                    \"text\": \"Indexed!\\n3\",\n                    \"pub_date\": \"2009-02-22T00:00:00\",\n                    \"id\": \"core.mockmodel.3\",\n                },\n            ],\n        )\n\n    def test_remove_succeeds_on_404(self):\n        self.sb.silently_fail = False\n        self.sb.remove(\"core.mockmodel.421\")\n\n    def test_clear(self):\n        self.sb.update(self.smmi, self.sample_objs)\n        self.assertEqual(self.raw_search(\"*:*\").get(\"hits\", {}).get(\"total\", 0), 3)\n\n        self.sb.clear()\n        self.assertEqual(self.raw_search(\"*:*\").get(\"hits\", {}).get(\"total\", 0), 0)\n\n        self.sb.update(self.smmi, self.sample_objs)\n        self.assertEqual(self.raw_search(\"*:*\").get(\"hits\", {}).get(\"total\", 0), 3)\n\n        self.sb.clear([AnotherMockModel])\n        self.assertEqual(self.raw_search(\"*:*\").get(\"hits\", {}).get(\"total\", 0), 3)\n\n        self.sb.clear([MockModel])\n        self.assertEqual(self.raw_search(\"*:*\").get(\"hits\", {}).get(\"total\", 0), 0)\n\n        self.sb.update(self.smmi, self.sample_objs)\n        self.assertEqual(self.raw_search(\"*:*\").get(\"hits\", {}).get(\"total\", 0), 3)\n\n        self.sb.clear([AnotherMockModel, MockModel])\n        self.assertEqual(self.raw_search(\"*:*\").get(\"hits\", {}).get(\"total\", 0), 0)\n\n    def test_search(self):\n        self.sb.update(self.smmi, self.sample_objs)\n        self.assertEqual(self.raw_search(\"*:*\")[\"hits\"][\"total\"], 3)\n\n        self.assertEqual(self.sb.search(\"\"), {\"hits\": 0, \"results\": []})\n        self.assertEqual(self.sb.search(\"*:*\")[\"hits\"], 3)\n        self.assertEqual(\n            set([result.pk for result in self.sb.search(\"*:*\")[\"results\"]]),\n            {\"2\", \"1\", \"3\"},\n        )\n\n        self.assertEqual(self.sb.search(\"\", highlight=True), {\"hits\": 0, \"results\": []})\n        self.assertEqual(self.sb.search(\"Index\", highlight=True)[\"hits\"], 3)\n        self.assertEqual(\n            sorted(\n                [\n                    result.highlighted[0]\n                    for result in self.sb.search(\"Index\", highlight=True)[\"results\"]\n                ]\n            ),\n            [\"<em>Indexed</em>!\\n1\", \"<em>Indexed</em>!\\n2\", \"<em>Indexed</em>!\\n3\"],\n        )\n\n        self.assertEqual(self.sb.search(\"Indx\")[\"hits\"], 0)\n        self.assertEqual(self.sb.search(\"indaxed\")[\"spelling_suggestion\"], \"indexed\")\n        self.assertEqual(\n            self.sb.search(\"arf\", spelling_query=\"indexyd\")[\"spelling_suggestion\"],\n            \"indexed\",\n        )\n\n        self.assertEqual(\n            self.sb.search(\"\", facets={\"name\": {}}), {\"hits\": 0, \"results\": []}\n        )\n        results = self.sb.search(\"Index\", facets={\"name\": {}})\n        self.assertEqual(results[\"hits\"], 3)\n        self.assertSetEqual(\n            set(results[\"facets\"][\"fields\"][\"name\"]),\n            {(\"daniel3\", 1), (\"daniel2\", 1), (\"daniel1\", 1)},\n        )\n\n        self.assertEqual(\n            self.sb.search(\n                \"\",\n                date_facets={\n                    \"pub_date\": {\n                        \"start_date\": datetime.date(2008, 1, 1),\n                        \"end_date\": datetime.date(2009, 4, 1),\n                        \"gap_by\": \"month\",\n                        \"gap_amount\": 1,\n                    }\n                },\n            ),\n            {\"hits\": 0, \"results\": []},\n        )\n        results = self.sb.search(\n            \"Index\",\n            date_facets={\n                \"pub_date\": {\n                    \"start_date\": datetime.date(2008, 1, 1),\n                    \"end_date\": datetime.date(2009, 4, 1),\n                    \"gap_by\": \"month\",\n                    \"gap_amount\": 1,\n                }\n            },\n        )\n        self.assertEqual(results[\"hits\"], 3)\n        self.assertEqual(\n            results[\"facets\"][\"dates\"][\"pub_date\"],\n            [(datetime.datetime(2009, 2, 1, 0, 0), 3)],\n        )\n\n        self.assertEqual(\n            self.sb.search(\"\", query_facets=[(\"name\", \"[* TO e]\")]),\n            {\"hits\": 0, \"results\": []},\n        )\n        results = self.sb.search(\"Index\", query_facets=[(\"name\", \"[* TO e]\")])\n        self.assertEqual(results[\"hits\"], 3)\n        self.assertEqual(results[\"facets\"][\"queries\"], {\"name\": 3})\n\n        self.assertEqual(\n            self.sb.search(\"\", narrow_queries={\"name:daniel1\"}),\n            {\"hits\": 0, \"results\": []},\n        )\n        results = self.sb.search(\"Index\", narrow_queries={\"name:daniel1\"})\n        self.assertEqual(results[\"hits\"], 1)\n\n        # Ensure that swapping the ``result_class`` works.\n        self.assertTrue(\n            isinstance(\n                self.sb.search(\"index\", result_class=MockSearchResult)[\"results\"][0],\n                MockSearchResult,\n            )\n        )\n\n        # Check the use of ``limit_to_registered_models``.\n        self.assertEqual(\n            self.sb.search(\"\", limit_to_registered_models=False),\n            {\"hits\": 0, \"results\": []},\n        )\n        self.assertEqual(\n            self.sb.search(\"*:*\", limit_to_registered_models=False)[\"hits\"], 3\n        )\n        self.assertEqual(\n            sorted(\n                [\n                    result.pk\n                    for result in self.sb.search(\n                        \"*:*\", limit_to_registered_models=False\n                    )[\"results\"]\n                ]\n            ),\n            [\"1\", \"2\", \"3\"],\n        )\n\n        # Stow.\n        old_limit_to_registered_models = getattr(\n            settings, \"HAYSTACK_LIMIT_TO_REGISTERED_MODELS\", True\n        )\n        settings.HAYSTACK_LIMIT_TO_REGISTERED_MODELS = False\n\n        self.assertEqual(self.sb.search(\"\"), {\"hits\": 0, \"results\": []})\n        self.assertEqual(self.sb.search(\"*:*\")[\"hits\"], 3)\n        self.assertEqual(\n            sorted([result.pk for result in self.sb.search(\"*:*\")[\"results\"]]),\n            [\"1\", \"2\", \"3\"],\n        )\n\n        # Restore.\n        settings.HAYSTACK_LIMIT_TO_REGISTERED_MODELS = old_limit_to_registered_models\n\n    def test_spatial_search_parameters(self):\n        from django.contrib.gis.geos import Point\n\n        p1 = Point(1.23, 4.56)\n        kwargs = self.sb.build_search_kwargs(\n            \"*:*\",\n            distance_point={\"field\": \"location\", \"point\": p1},\n            sort_by=((\"distance\", \"desc\"),),\n        )\n\n        self.assertIn(\"sort\", kwargs)\n        self.assertEqual(1, len(kwargs[\"sort\"]))\n        geo_d = kwargs[\"sort\"][0][\"_geo_distance\"]\n\n        # ElasticSearch supports the GeoJSON-style lng, lat pairs so unlike Solr the values should be\n        # in the same order as we used to create the Point():\n        # http://www.elasticsearch.org/guide/en/elasticsearch/reference/current/query-dsl-geo-distance-filter.html#_lat_lon_as_array_4\n\n        self.assertDictEqual(\n            geo_d, {\"location\": [1.23, 4.56], \"unit\": \"km\", \"order\": \"desc\"}\n        )\n\n    def test_more_like_this(self):\n        self.sb.update(self.smmi, self.sample_objs)\n        self.assertEqual(self.raw_search(\"*:*\")[\"hits\"][\"total\"], 3)\n\n        # A functional MLT example with enough data to work is below. Rely on\n        # this to ensure the API is correct enough.\n        self.assertEqual(self.sb.more_like_this(self.sample_objs[0])[\"hits\"], 0)\n        self.assertEqual(\n            [\n                result.pk\n                for result in self.sb.more_like_this(self.sample_objs[0])[\"results\"]\n            ],\n            [],\n        )\n\n    def test_build_schema(self):\n        old_ui = connections[\"elasticsearch\"].get_unified_index()\n\n        (content_field_name, mapping) = self.sb.build_schema(old_ui.all_searchfields())\n        self.assertEqual(content_field_name, \"text\")\n        self.assertEqual(len(mapping), 4 + 2)  # +2 management fields\n        self.assertEqual(\n            mapping,\n            {\n                \"django_id\": {\n                    \"index\": \"not_analyzed\",\n                    \"type\": \"string\",\n                    \"include_in_all\": False,\n                },\n                \"django_ct\": {\n                    \"index\": \"not_analyzed\",\n                    \"type\": \"string\",\n                    \"include_in_all\": False,\n                },\n                \"text\": {\"type\": \"string\", \"analyzer\": \"snowball\"},\n                \"pub_date\": {\"type\": \"date\"},\n                \"name\": {\"type\": \"string\", \"analyzer\": \"snowball\"},\n                \"name_exact\": {\"index\": \"not_analyzed\", \"type\": \"string\"},\n            },\n        )\n\n        ui = UnifiedIndex()\n        ui.build(indexes=[Elasticsearch2ComplexFacetsMockSearchIndex()])\n        (content_field_name, mapping) = self.sb.build_schema(ui.all_searchfields())\n        self.assertEqual(content_field_name, \"text\")\n        self.assertEqual(len(mapping), 15 + 2)  # +2 management fields\n        self.assertEqual(\n            mapping,\n            {\n                \"django_id\": {\n                    \"index\": \"not_analyzed\",\n                    \"type\": \"string\",\n                    \"include_in_all\": False,\n                },\n                \"django_ct\": {\n                    \"index\": \"not_analyzed\",\n                    \"type\": \"string\",\n                    \"include_in_all\": False,\n                },\n                \"name\": {\"type\": \"string\", \"analyzer\": \"snowball\"},\n                \"is_active_exact\": {\"type\": \"boolean\"},\n                \"created\": {\"type\": \"date\"},\n                \"post_count\": {\"type\": \"long\"},\n                \"created_exact\": {\"type\": \"date\"},\n                \"sites_exact\": {\"index\": \"not_analyzed\", \"type\": \"string\"},\n                \"is_active\": {\"type\": \"boolean\"},\n                \"sites\": {\"type\": \"string\", \"analyzer\": \"snowball\"},\n                \"post_count_i\": {\"type\": \"long\"},\n                \"average_rating\": {\"type\": \"float\"},\n                \"text\": {\"type\": \"string\", \"analyzer\": \"snowball\"},\n                \"pub_date_exact\": {\"type\": \"date\"},\n                \"name_exact\": {\"index\": \"not_analyzed\", \"type\": \"string\"},\n                \"pub_date\": {\"type\": \"date\"},\n                \"average_rating_exact\": {\"type\": \"float\"},\n            },\n        )\n\n    def test_verify_type(self):\n        old_ui = connections[\"elasticsearch\"].get_unified_index()\n        ui = UnifiedIndex()\n        smtmmi = Elasticsearch2MaintainTypeMockSearchIndex()\n        ui.build(indexes=[smtmmi])\n        connections[\"elasticsearch\"]._index = ui\n        sb = connections[\"elasticsearch\"].get_backend()\n        sb.update(smtmmi, self.sample_objs)\n\n        self.assertEqual(sb.search(\"*:*\")[\"hits\"], 3)\n        self.assertEqual(\n            [result.month for result in sb.search(\"*:*\")[\"results\"]], [\"02\", \"02\", \"02\"]\n        )\n        connections[\"elasticsearch\"]._index = old_ui\n\n\nclass CaptureHandler(std_logging.Handler):\n    logs_seen = []\n\n    def emit(self, record):\n        CaptureHandler.logs_seen.append(record)\n\n\nclass FailedElasticsearch2SearchBackendTestCase(TestCase):\n    def setUp(self):\n        self.sample_objs = []\n\n        for i in range(1, 4):\n            mock = MockModel()\n            mock.id = i\n            mock.author = \"daniel%s\" % i\n            mock.pub_date = datetime.date(2009, 2, 25) - datetime.timedelta(days=i)\n            self.sample_objs.append(mock)\n\n        # Stow.\n        # Point the backend at a URL that doesn't exist so we can watch the\n        # sparks fly.\n        self.old_es_url = settings.HAYSTACK_CONNECTIONS[\"elasticsearch\"][\"URL\"]\n        settings.HAYSTACK_CONNECTIONS[\"elasticsearch\"][\"URL\"] = (\n            \"%s/foo/\" % self.old_es_url\n        )\n        self.cap = CaptureHandler()\n        logging.getLogger(\"haystack\").addHandler(self.cap)\n        config = apps.get_app_config(\"haystack\")\n        logging.getLogger(\"haystack\").removeHandler(config.stream)\n\n        # Setup the rest of the bits.\n        self.old_ui = connections[\"elasticsearch\"].get_unified_index()\n        ui = UnifiedIndex()\n        self.smmi = Elasticsearch2MockSearchIndex()\n        ui.build(indexes=[self.smmi])\n        connections[\"elasticsearch\"]._index = ui\n        self.sb = connections[\"elasticsearch\"].get_backend()\n\n    def tearDown(self):\n        # Restore.\n        settings.HAYSTACK_CONNECTIONS[\"elasticsearch\"][\"URL\"] = self.old_es_url\n        connections[\"elasticsearch\"]._index = self.old_ui\n        config = apps.get_app_config(\"haystack\")\n        logging.getLogger(\"haystack\").removeHandler(self.cap)\n        logging.getLogger(\"haystack\").addHandler(config.stream)\n\n    @unittest.expectedFailure\n    def test_all_cases(self):\n        # Prior to the addition of the try/except bits, these would all fail miserably.\n        self.assertEqual(len(CaptureHandler.logs_seen), 0)\n\n        self.sb.update(self.smmi, self.sample_objs)\n        self.assertEqual(len(CaptureHandler.logs_seen), 1)\n\n        self.sb.remove(self.sample_objs[0])\n        self.assertEqual(len(CaptureHandler.logs_seen), 2)\n\n        self.sb.search(\"search\")\n        self.assertEqual(len(CaptureHandler.logs_seen), 3)\n\n        self.sb.more_like_this(self.sample_objs[0])\n        self.assertEqual(len(CaptureHandler.logs_seen), 4)\n\n        self.sb.clear([MockModel])\n        self.assertEqual(len(CaptureHandler.logs_seen), 5)\n\n        self.sb.clear()\n        self.assertEqual(len(CaptureHandler.logs_seen), 6)\n\n\nclass LiveElasticsearch2SearchQueryTestCase(TestCase):\n    fixtures = [\"base_data.json\"]\n\n    def setUp(self):\n        super().setUp()\n\n        # Wipe it clean.\n        clear_elasticsearch_index()\n\n        # Stow.\n        self.old_ui = connections[\"elasticsearch\"].get_unified_index()\n        self.ui = UnifiedIndex()\n        self.smmi = Elasticsearch2MockSearchIndex()\n        self.ui.build(indexes=[self.smmi])\n        connections[\"elasticsearch\"]._index = self.ui\n        self.sb = connections[\"elasticsearch\"].get_backend()\n        self.sq = connections[\"elasticsearch\"].get_query()\n\n        # Force indexing of the content.\n        self.smmi.update(using=\"elasticsearch\")\n\n    def tearDown(self):\n        connections[\"elasticsearch\"]._index = self.old_ui\n        super().tearDown()\n\n    def test_log_query(self):\n        reset_search_queries()\n        self.assertEqual(len(connections[\"elasticsearch\"].queries), 0)\n\n        with self.settings(DEBUG=False):\n            len(self.sq.get_results())\n            self.assertEqual(len(connections[\"elasticsearch\"].queries), 0)\n\n        with self.settings(DEBUG=True):\n            # Redefine it to clear out the cached results.\n            self.sq = connections[\"elasticsearch\"].query(using=\"elasticsearch\")\n            self.sq.add_filter(SQ(name=\"bar\"))\n            len(self.sq.get_results())\n            self.assertEqual(len(connections[\"elasticsearch\"].queries), 1)\n            self.assertEqual(\n                connections[\"elasticsearch\"].queries[0][\"query_string\"], \"name:(bar)\"\n            )\n\n            # And again, for good measure.\n            self.sq = connections[\"elasticsearch\"].query(\"elasticsearch\")\n            self.sq.add_filter(SQ(name=\"bar\"))\n            self.sq.add_filter(SQ(text=\"moof\"))\n            len(self.sq.get_results())\n            self.assertEqual(len(connections[\"elasticsearch\"].queries), 2)\n            self.assertEqual(\n                connections[\"elasticsearch\"].queries[0][\"query_string\"], \"name:(bar)\"\n            )\n            self.assertEqual(\n                connections[\"elasticsearch\"].queries[1][\"query_string\"],\n                \"(name:(bar) AND text:(moof))\",\n            )\n\n\nlssqstc_all_loaded = None\n\n\n@override_settings(DEBUG=True)\nclass LiveElasticsearch2SearchQuerySetTestCase(TestCase):\n    \"\"\"Used to test actual implementation details of the SearchQuerySet.\"\"\"\n\n    fixtures = [\"bulk_data.json\"]\n\n    def setUp(self):\n        super().setUp()\n\n        # Stow.\n        self.old_ui = connections[\"elasticsearch\"].get_unified_index()\n        self.ui = UnifiedIndex()\n        self.smmi = Elasticsearch2MockSearchIndex()\n        self.ui.build(indexes=[self.smmi])\n        connections[\"elasticsearch\"]._index = self.ui\n\n        self.sqs = SearchQuerySet(\"elasticsearch\")\n        self.rsqs = RelatedSearchQuerySet(\"elasticsearch\")\n\n        # Ugly but not constantly reindexing saves us almost 50% runtime.\n        global lssqstc_all_loaded\n\n        if lssqstc_all_loaded is None:\n            lssqstc_all_loaded = True\n\n            # Wipe it clean.\n            clear_elasticsearch_index()\n\n            # Force indexing of the content.\n            self.smmi.update(using=\"elasticsearch\")\n\n    def tearDown(self):\n        # Restore.\n        connections[\"elasticsearch\"]._index = self.old_ui\n        super().tearDown()\n\n    def test_load_all(self):\n        sqs = self.sqs.order_by(\"pub_date\").load_all()\n        self.assertTrue(isinstance(sqs, SearchQuerySet))\n        self.assertTrue(len(sqs) > 0)\n        self.assertEqual(\n            sqs[2].object.foo,\n            \"In addition, you may specify other fields to be populated along with the document. In this case, we also index the user who authored the document as well as the date the document was published. The variable you assign the SearchField to should directly map to the field your search backend is expecting. You instantiate most search fields with a parameter that points to the attribute of the object to populate that field with.\",\n        )\n\n    def test_iter(self):\n        reset_search_queries()\n        self.assertEqual(len(connections[\"elasticsearch\"].queries), 0)\n        sqs = self.sqs.all()\n        results = sorted([int(result.pk) for result in sqs])\n        self.assertEqual(results, list(range(1, 24)))\n        self.assertEqual(len(connections[\"elasticsearch\"].queries), 3)\n\n    def test_slice(self):\n        reset_search_queries()\n        self.assertEqual(len(connections[\"elasticsearch\"].queries), 0)\n        results = self.sqs.all().order_by(\"pub_date\")\n        self.assertEqual(\n            [int(result.pk) for result in results[1:11]],\n            [3, 2, 4, 5, 6, 7, 8, 9, 10, 11],\n        )\n        self.assertEqual(len(connections[\"elasticsearch\"].queries), 1)\n\n        reset_search_queries()\n        self.assertEqual(len(connections[\"elasticsearch\"].queries), 0)\n        results = self.sqs.all().order_by(\"pub_date\")\n        self.assertEqual(int(results[21].pk), 22)\n        self.assertEqual(len(connections[\"elasticsearch\"].queries), 1)\n\n    def test_values_slicing(self):\n        reset_search_queries()\n        self.assertEqual(len(connections[\"elasticsearch\"].queries), 0)\n\n        # TODO: this would be a good candidate for refactoring into a TestCase subclass shared across backends\n\n        # The values will come back as strings because Hasytack doesn't assume PKs are integers.\n        # We'll prepare this set once since we're going to query the same results in multiple ways:\n        expected_pks = [str(i) for i in [3, 2, 4, 5, 6, 7, 8, 9, 10, 11]]\n\n        results = self.sqs.all().order_by(\"pub_date\").values(\"pk\")\n        self.assertListEqual([i[\"pk\"] for i in results[1:11]], expected_pks)\n\n        results = self.sqs.all().order_by(\"pub_date\").values_list(\"pk\")\n        self.assertListEqual([i[0] for i in results[1:11]], expected_pks)\n\n        results = self.sqs.all().order_by(\"pub_date\").values_list(\"pk\", flat=True)\n        self.assertListEqual(results[1:11], expected_pks)\n\n        self.assertEqual(len(connections[\"elasticsearch\"].queries), 3)\n\n    def test_count(self):\n        reset_search_queries()\n        self.assertEqual(len(connections[\"elasticsearch\"].queries), 0)\n        sqs = self.sqs.all()\n        self.assertEqual(sqs.count(), 23)\n        self.assertEqual(sqs.count(), 23)\n        self.assertEqual(len(sqs), 23)\n        self.assertEqual(sqs.count(), 23)\n        # Should only execute one query to count the length of the result set.\n        self.assertEqual(len(connections[\"elasticsearch\"].queries), 1)\n\n    def test_manual_iter(self):\n        results = self.sqs.all()\n\n        reset_search_queries()\n        self.assertEqual(len(connections[\"elasticsearch\"].queries), 0)\n        results = set([int(result.pk) for result in results._manual_iter()])\n        self.assertEqual(\n            results,\n            {\n                2,\n                7,\n                12,\n                17,\n                1,\n                6,\n                11,\n                16,\n                23,\n                5,\n                10,\n                15,\n                22,\n                4,\n                9,\n                14,\n                19,\n                21,\n                3,\n                8,\n                13,\n                18,\n                20,\n            },\n        )\n        self.assertEqual(len(connections[\"elasticsearch\"].queries), 3)\n\n    def test_fill_cache(self):\n        reset_search_queries()\n        self.assertEqual(len(connections[\"elasticsearch\"].queries), 0)\n        results = self.sqs.all()\n        self.assertEqual(len(results._result_cache), 0)\n        self.assertEqual(len(connections[\"elasticsearch\"].queries), 0)\n        results._fill_cache(0, 10)\n        self.assertEqual(\n            len([result for result in results._result_cache if result is not None]), 10\n        )\n        self.assertEqual(len(connections[\"elasticsearch\"].queries), 1)\n        results._fill_cache(10, 20)\n        self.assertEqual(\n            len([result for result in results._result_cache if result is not None]), 20\n        )\n        self.assertEqual(len(connections[\"elasticsearch\"].queries), 2)\n\n    def test_cache_is_full(self):\n        reset_search_queries()\n        self.assertEqual(len(connections[\"elasticsearch\"].queries), 0)\n        self.assertEqual(self.sqs._cache_is_full(), False)\n        results = self.sqs.all()\n        fire_the_iterator_and_fill_cache = [result for result in results]\n        self.assertEqual(results._cache_is_full(), True)\n        self.assertEqual(len(connections[\"elasticsearch\"].queries), 3)\n\n    def test___and__(self):\n        sqs1 = self.sqs.filter(content=\"foo\")\n        sqs2 = self.sqs.filter(content=\"bar\")\n        sqs = sqs1 & sqs2\n\n        self.assertTrue(isinstance(sqs, SearchQuerySet))\n        self.assertEqual(len(sqs.query.query_filter), 2)\n        self.assertEqual(sqs.query.build_query(), \"((foo) AND (bar))\")\n\n        # Now for something more complex...\n        sqs3 = self.sqs.exclude(title=\"moof\").filter(\n            SQ(content=\"foo\") | SQ(content=\"baz\")\n        )\n        sqs4 = self.sqs.filter(content=\"bar\")\n        sqs = sqs3 & sqs4\n\n        self.assertTrue(isinstance(sqs, SearchQuerySet))\n        self.assertEqual(len(sqs.query.query_filter), 3)\n        self.assertEqual(\n            sqs.query.build_query(),\n            \"(NOT (title:(moof)) AND ((foo) OR (baz)) AND (bar))\",\n        )\n\n    def test___or__(self):\n        sqs1 = self.sqs.filter(content=\"foo\")\n        sqs2 = self.sqs.filter(content=\"bar\")\n        sqs = sqs1 | sqs2\n\n        self.assertTrue(isinstance(sqs, SearchQuerySet))\n        self.assertEqual(len(sqs.query.query_filter), 2)\n        self.assertEqual(sqs.query.build_query(), \"((foo) OR (bar))\")\n\n        # Now for something more complex...\n        sqs3 = self.sqs.exclude(title=\"moof\").filter(\n            SQ(content=\"foo\") | SQ(content=\"baz\")\n        )\n        sqs4 = self.sqs.filter(content=\"bar\").models(MockModel)\n        sqs = sqs3 | sqs4\n\n        self.assertTrue(isinstance(sqs, SearchQuerySet))\n        self.assertEqual(len(sqs.query.query_filter), 2)\n        self.assertEqual(\n            sqs.query.build_query(),\n            \"((NOT (title:(moof)) AND ((foo) OR (baz))) OR (bar))\",\n        )\n\n    def test_auto_query(self):\n        # Ensure bits in exact matches get escaped properly as well.\n        # This will break horrifically if escaping isn't working.\n        sqs = self.sqs.auto_query('\"pants:rule\"')\n        self.assertTrue(isinstance(sqs, SearchQuerySet))\n        self.assertEqual(\n            repr(sqs.query.query_filter), '<SQ: AND content__content=\"pants:rule\">'\n        )\n        self.assertEqual(sqs.query.build_query(), '(\"pants\\\\:rule\")')\n        self.assertEqual(len(sqs), 0)\n\n    # Regressions\n\n    def test_regression_proper_start_offsets(self):\n        sqs = self.sqs.filter(text=\"index\")\n        self.assertNotEqual(sqs.count(), 0)\n\n        id_counts = {}\n\n        for item in sqs:\n            if item.id in id_counts:\n                id_counts[item.id] += 1\n            else:\n                id_counts[item.id] = 1\n\n        for key, value in id_counts.items():\n            if value > 1:\n                self.fail(\n                    \"Result with id '%s' seen more than once in the results.\" % key\n                )\n\n    def test_regression_raw_search_breaks_slicing(self):\n        sqs = self.sqs.raw_search(\"text:index\")\n        page_1 = [result.pk for result in sqs[0:10]]\n        page_2 = [result.pk for result in sqs[10:20]]\n\n        for pk in page_2:\n            if pk in page_1:\n                self.fail(\n                    \"Result with id '%s' seen more than once in the results.\" % pk\n                )\n\n    # RelatedSearchQuerySet Tests\n\n    def test_related_load_all(self):\n        sqs = self.rsqs.order_by(\"pub_date\").load_all()\n        self.assertTrue(isinstance(sqs, SearchQuerySet))\n        self.assertTrue(len(sqs) > 0)\n        self.assertEqual(\n            sqs[2].object.foo,\n            \"In addition, you may specify other fields to be populated along with the document. In this case, we also index the user who authored the document as well as the date the document was published. The variable you assign the SearchField to should directly map to the field your search backend is expecting. You instantiate most search fields with a parameter that points to the attribute of the object to populate that field with.\",\n        )\n\n    def test_related_load_all_queryset(self):\n        sqs = self.rsqs.load_all().order_by(\"pub_date\")\n        self.assertEqual(len(sqs._load_all_querysets), 0)\n\n        sqs = sqs.load_all_queryset(MockModel, MockModel.objects.filter(id__gt=1))\n        self.assertTrue(isinstance(sqs, SearchQuerySet))\n        self.assertEqual(len(sqs._load_all_querysets), 1)\n        self.assertEqual(sorted([obj.object.id for obj in sqs]), list(range(2, 24)))\n\n        sqs = sqs.load_all_queryset(MockModel, MockModel.objects.filter(id__gt=10))\n        self.assertTrue(isinstance(sqs, SearchQuerySet))\n        self.assertEqual(len(sqs._load_all_querysets), 1)\n        self.assertEqual(\n            set([obj.object.id for obj in sqs]),\n            {12, 17, 11, 16, 23, 15, 22, 14, 19, 21, 13, 18, 20},\n        )\n        self.assertEqual(set([obj.object.id for obj in sqs[10:20]]), {21, 22, 23})\n\n    def test_related_iter(self):\n        reset_search_queries()\n        self.assertEqual(len(connections[\"elasticsearch\"].queries), 0)\n        sqs = self.rsqs.all()\n        results = set([int(result.pk) for result in sqs])\n        self.assertEqual(\n            results,\n            {\n                2,\n                7,\n                12,\n                17,\n                1,\n                6,\n                11,\n                16,\n                23,\n                5,\n                10,\n                15,\n                22,\n                4,\n                9,\n                14,\n                19,\n                21,\n                3,\n                8,\n                13,\n                18,\n                20,\n            },\n        )\n        self.assertEqual(len(connections[\"elasticsearch\"].queries), 3)\n\n    def test_related_slice(self):\n        reset_search_queries()\n        self.assertEqual(len(connections[\"elasticsearch\"].queries), 0)\n        results = self.rsqs.all().order_by(\"pub_date\")\n        self.assertEqual(\n            [int(result.pk) for result in results[1:11]],\n            [3, 2, 4, 5, 6, 7, 8, 9, 10, 11],\n        )\n        self.assertEqual(len(connections[\"elasticsearch\"].queries), 1)\n\n        reset_search_queries()\n        self.assertEqual(len(connections[\"elasticsearch\"].queries), 0)\n        results = self.rsqs.all().order_by(\"pub_date\")\n        self.assertEqual(int(results[21].pk), 22)\n        self.assertEqual(len(connections[\"elasticsearch\"].queries), 1)\n\n        reset_search_queries()\n        self.assertEqual(len(connections[\"elasticsearch\"].queries), 0)\n        results = self.rsqs.all().order_by(\"pub_date\")\n        self.assertEqual(\n            set([int(result.pk) for result in results[20:30]]), {21, 22, 23}\n        )\n        self.assertEqual(len(connections[\"elasticsearch\"].queries), 1)\n\n    def test_related_manual_iter(self):\n        results = self.rsqs.all()\n\n        reset_search_queries()\n        self.assertEqual(len(connections[\"elasticsearch\"].queries), 0)\n        results = sorted([int(result.pk) for result in results._manual_iter()])\n        self.assertEqual(results, list(range(1, 24)))\n        self.assertEqual(len(connections[\"elasticsearch\"].queries), 3)\n\n    def test_related_fill_cache(self):\n        reset_search_queries()\n        self.assertEqual(len(connections[\"elasticsearch\"].queries), 0)\n        results = self.rsqs.all()\n        self.assertEqual(len(results._result_cache), 0)\n        self.assertEqual(len(connections[\"elasticsearch\"].queries), 0)\n        results._fill_cache(0, 10)\n        self.assertEqual(\n            len([result for result in results._result_cache if result is not None]), 10\n        )\n        self.assertEqual(len(connections[\"elasticsearch\"].queries), 1)\n        results._fill_cache(10, 20)\n        self.assertEqual(\n            len([result for result in results._result_cache if result is not None]), 20\n        )\n        self.assertEqual(len(connections[\"elasticsearch\"].queries), 2)\n\n    def test_related_cache_is_full(self):\n        reset_search_queries()\n        self.assertEqual(len(connections[\"elasticsearch\"].queries), 0)\n        self.assertEqual(self.rsqs._cache_is_full(), False)\n        results = self.rsqs.all()\n        fire_the_iterator_and_fill_cache = [result for result in results]\n        self.assertEqual(results._cache_is_full(), True)\n        self.assertEqual(len(connections[\"elasticsearch\"].queries), 3)\n\n    def test_quotes_regression(self):\n        sqs = self.sqs.auto_query(\"44°48'40''N 20°28'32''E\")\n        # Should not have empty terms.\n        self.assertEqual(sqs.query.build_query(), \"(44\\xb048'40''N 20\\xb028'32''E)\")\n        # Should not cause Elasticsearch to 500.\n        self.assertEqual(sqs.count(), 0)\n\n        sqs = self.sqs.auto_query(\"blazing\")\n        self.assertEqual(sqs.query.build_query(), \"(blazing)\")\n        self.assertEqual(sqs.count(), 0)\n        sqs = self.sqs.auto_query(\"blazing saddles\")\n        self.assertEqual(sqs.query.build_query(), \"(blazing saddles)\")\n        self.assertEqual(sqs.count(), 0)\n        sqs = self.sqs.auto_query('\"blazing saddles')\n        self.assertEqual(sqs.query.build_query(), '(\\\\\"blazing saddles)')\n        self.assertEqual(sqs.count(), 0)\n        sqs = self.sqs.auto_query('\"blazing saddles\"')\n        self.assertEqual(sqs.query.build_query(), '(\"blazing saddles\")')\n        self.assertEqual(sqs.count(), 0)\n        sqs = self.sqs.auto_query('mel \"blazing saddles\"')\n        self.assertEqual(sqs.query.build_query(), '(mel \"blazing saddles\")')\n        self.assertEqual(sqs.count(), 0)\n        sqs = self.sqs.auto_query('mel \"blazing \\'saddles\"')\n        self.assertEqual(sqs.query.build_query(), '(mel \"blazing \\'saddles\")')\n        self.assertEqual(sqs.count(), 0)\n        sqs = self.sqs.auto_query(\"mel \\\"blazing ''saddles\\\"\")\n        self.assertEqual(sqs.query.build_query(), \"(mel \\\"blazing ''saddles\\\")\")\n        self.assertEqual(sqs.count(), 0)\n        sqs = self.sqs.auto_query(\"mel \\\"blazing ''saddles\\\"'\")\n        self.assertEqual(sqs.query.build_query(), \"(mel \\\"blazing ''saddles\\\" ')\")\n        self.assertEqual(sqs.count(), 0)\n        sqs = self.sqs.auto_query(\"mel \\\"blazing ''saddles\\\"'\\\"\")\n        self.assertEqual(sqs.query.build_query(), \"(mel \\\"blazing ''saddles\\\" '\\\\\\\")\")\n        self.assertEqual(sqs.count(), 0)\n        sqs = self.sqs.auto_query('\"blazing saddles\" mel')\n        self.assertEqual(sqs.query.build_query(), '(\"blazing saddles\" mel)')\n        self.assertEqual(sqs.count(), 0)\n        sqs = self.sqs.auto_query('\"blazing saddles\" mel brooks')\n        self.assertEqual(sqs.query.build_query(), '(\"blazing saddles\" mel brooks)')\n        self.assertEqual(sqs.count(), 0)\n        sqs = self.sqs.auto_query('mel \"blazing saddles\" brooks')\n        self.assertEqual(sqs.query.build_query(), '(mel \"blazing saddles\" brooks)')\n        self.assertEqual(sqs.count(), 0)\n        sqs = self.sqs.auto_query('mel \"blazing saddles\" \"brooks')\n        self.assertEqual(sqs.query.build_query(), '(mel \"blazing saddles\" \\\\\"brooks)')\n        self.assertEqual(sqs.count(), 0)\n\n    def test_query_generation(self):\n        sqs = self.sqs.filter(\n            SQ(content=AutoQuery(\"hello world\")) | SQ(title=AutoQuery(\"hello world\"))\n        )\n        self.assertEqual(\n            sqs.query.build_query(), \"((hello world) OR title:(hello world))\"\n        )\n\n    def test_result_class(self):\n        # Assert that we're defaulting to ``SearchResult``.\n        sqs = self.sqs.all()\n        self.assertTrue(isinstance(sqs[0], SearchResult))\n\n        # Custom class.\n        sqs = self.sqs.result_class(MockSearchResult).all()\n        self.assertTrue(isinstance(sqs[0], MockSearchResult))\n\n        # Reset to default.\n        sqs = self.sqs.result_class(None).all()\n        self.assertTrue(isinstance(sqs[0], SearchResult))\n\n\n@override_settings(DEBUG=True)\nclass LiveElasticsearch2SpellingTestCase(TestCase):\n    \"\"\"Used to test actual implementation details of the SearchQuerySet.\"\"\"\n\n    fixtures = [\"bulk_data.json\"]\n\n    def setUp(self):\n        super().setUp()\n\n        # Stow.\n        self.old_ui = connections[\"elasticsearch\"].get_unified_index()\n        self.ui = UnifiedIndex()\n        self.smmi = Elasticsearch2MockSpellingIndex()\n        self.ui.build(indexes=[self.smmi])\n        connections[\"elasticsearch\"]._index = self.ui\n\n        self.sqs = SearchQuerySet(\"elasticsearch\")\n\n        # Wipe it clean.\n        clear_elasticsearch_index()\n\n        # Reboot the schema.\n        self.sb = connections[\"elasticsearch\"].get_backend()\n        self.sb.setup()\n\n        self.smmi.update(using=\"elasticsearch\")\n\n    def tearDown(self):\n        # Restore.\n        connections[\"elasticsearch\"]._index = self.old_ui\n        super().tearDown()\n\n    def test_spelling(self):\n        self.assertEqual(\n            self.sqs.auto_query(\"structurd\").spelling_suggestion(), \"structured\"\n        )\n        self.assertEqual(self.sqs.spelling_suggestion(\"structurd\"), \"structured\")\n        self.assertEqual(\n            self.sqs.auto_query(\"srchindex instanc\").spelling_suggestion(),\n            \"searchindex instance\",\n        )\n        self.assertEqual(\n            self.sqs.spelling_suggestion(\"srchindex instanc\"), \"searchindex instance\"\n        )\n\n\nclass LiveElasticsearch2MoreLikeThisTestCase(TestCase):\n    fixtures = [\"bulk_data.json\"]\n\n    def setUp(self):\n        super().setUp()\n\n        # Wipe it clean.\n        clear_elasticsearch_index()\n\n        self.old_ui = connections[\"elasticsearch\"].get_unified_index()\n        self.ui = UnifiedIndex()\n        self.smmi = Elasticsearch2MockModelSearchIndex()\n        self.sammi = Elasticsearch2AnotherMockModelSearchIndex()\n        self.ui.build(indexes=[self.smmi, self.sammi])\n        connections[\"elasticsearch\"]._index = self.ui\n\n        self.sqs = SearchQuerySet(\"elasticsearch\")\n\n        self.smmi.update(using=\"elasticsearch\")\n        self.sammi.update(using=\"elasticsearch\")\n\n    def tearDown(self):\n        # Restore.\n        connections[\"elasticsearch\"]._index = self.old_ui\n        super().tearDown()\n\n    def test_more_like_this(self):\n        mlt = self.sqs.more_like_this(MockModel.objects.get(pk=1))\n        results = [result.pk for result in mlt]\n        self.assertEqual(mlt.count(), 11)\n        self.assertEqual(\n            set(results), {\"10\", \"5\", \"2\", \"21\", \"4\", \"6\", \"23\", \"9\", \"14\"}\n        )\n        self.assertEqual(len(results), 10)\n\n        alt_mlt = self.sqs.filter(name=\"daniel3\").more_like_this(\n            MockModel.objects.get(pk=2)\n        )\n        results = [result.pk for result in alt_mlt]\n        self.assertEqual(alt_mlt.count(), 9)\n        self.assertEqual(\n            set(results), {\"2\", \"16\", \"3\", \"19\", \"4\", \"17\", \"10\", \"22\", \"23\"}\n        )\n        self.assertEqual(len(results), 9)\n\n        alt_mlt_with_models = self.sqs.models(MockModel).more_like_this(\n            MockModel.objects.get(pk=1)\n        )\n        results = [result.pk for result in alt_mlt_with_models]\n        self.assertEqual(alt_mlt_with_models.count(), 10)\n        self.assertEqual(\n            set(results), {\"10\", \"5\", \"21\", \"2\", \"4\", \"6\", \"23\", \"9\", \"14\", \"16\"}\n        )\n        self.assertEqual(len(results), 10)\n\n        if hasattr(MockModel.objects, \"defer\"):\n            # Make sure MLT works with deferred bits.\n            qs = MockModel.objects.defer(\"foo\")\n            self.assertEqual(qs.query.deferred_loading[1], True)\n            deferred = self.sqs.models(MockModel).more_like_this(qs.get(pk=1))\n            self.assertEqual(deferred.count(), 10)\n            self.assertEqual(\n                {result.pk for result in deferred},\n                {\"10\", \"5\", \"21\", \"2\", \"4\", \"6\", \"23\", \"9\", \"14\", \"16\"},\n            )\n            self.assertEqual(len([result.pk for result in deferred]), 10)\n\n        # Ensure that swapping the ``result_class`` works.\n        self.assertTrue(\n            isinstance(\n                self.sqs.result_class(MockSearchResult).more_like_this(\n                    MockModel.objects.get(pk=1)\n                )[0],\n                MockSearchResult,\n            )\n        )\n\n\nclass LiveElasticsearch2AutocompleteTestCase(TestCase):\n    fixtures = [\"bulk_data.json\"]\n\n    def setUp(self):\n        super().setUp()\n\n        # Stow.\n        self.old_ui = connections[\"elasticsearch\"].get_unified_index()\n        self.ui = UnifiedIndex()\n        self.smmi = Elasticsearch2AutocompleteMockModelSearchIndex()\n        self.ui.build(indexes=[self.smmi])\n        connections[\"elasticsearch\"]._index = self.ui\n\n        self.sqs = SearchQuerySet(\"elasticsearch\")\n\n        # Wipe it clean.\n        clear_elasticsearch_index()\n\n        # Reboot the schema.\n        self.sb = connections[\"elasticsearch\"].get_backend()\n        self.sb.setup()\n\n        self.smmi.update(using=\"elasticsearch\")\n\n    def tearDown(self):\n        # Restore.\n        connections[\"elasticsearch\"]._index = self.old_ui\n        super().tearDown()\n\n    def test_build_schema(self):\n        self.sb = connections[\"elasticsearch\"].get_backend()\n        content_name, mapping = self.sb.build_schema(self.ui.all_searchfields())\n        self.assertEqual(\n            mapping,\n            {\n                \"django_id\": {\n                    \"index\": \"not_analyzed\",\n                    \"type\": \"string\",\n                    \"include_in_all\": False,\n                },\n                \"django_ct\": {\n                    \"index\": \"not_analyzed\",\n                    \"type\": \"string\",\n                    \"include_in_all\": False,\n                },\n                \"name_auto\": {\"type\": \"string\", \"analyzer\": \"edgengram_analyzer\"},\n                \"text\": {\"type\": \"string\", \"analyzer\": \"snowball\"},\n                \"pub_date\": {\"type\": \"date\"},\n                \"name\": {\"type\": \"string\", \"analyzer\": \"snowball\"},\n                \"text_auto\": {\"type\": \"string\", \"analyzer\": \"edgengram_analyzer\"},\n            },\n        )\n\n    def test_autocomplete(self):\n        autocomplete = self.sqs.autocomplete(text_auto=\"mod\")\n        self.assertEqual(autocomplete.count(), 16)\n        self.assertEqual(\n            set([result.pk for result in autocomplete]),\n            {\n                \"1\",\n                \"12\",\n                \"6\",\n                \"14\",\n                \"7\",\n                \"4\",\n                \"23\",\n                \"17\",\n                \"13\",\n                \"18\",\n                \"20\",\n                \"22\",\n                \"19\",\n                \"15\",\n                \"10\",\n                \"2\",\n            },\n        )\n        self.assertTrue(\"mod\" in autocomplete[0].text.lower())\n        self.assertTrue(\"mod\" in autocomplete[1].text.lower())\n        self.assertTrue(\"mod\" in autocomplete[2].text.lower())\n        self.assertTrue(\"mod\" in autocomplete[3].text.lower())\n        self.assertTrue(\"mod\" in autocomplete[4].text.lower())\n        self.assertEqual(len([result.pk for result in autocomplete]), 16)\n\n        # Test multiple words.\n        autocomplete_2 = self.sqs.autocomplete(text_auto=\"your mod\")\n        self.assertEqual(autocomplete_2.count(), 13)\n        self.assertEqual(\n            set([result.pk for result in autocomplete_2]),\n            {\"1\", \"6\", \"2\", \"14\", \"12\", \"13\", \"10\", \"19\", \"4\", \"20\", \"23\", \"22\", \"15\"},\n        )\n        map_results = {result.pk: result for result in autocomplete_2}\n        self.assertTrue(\"your\" in map_results[\"1\"].text.lower())\n        self.assertTrue(\"mod\" in map_results[\"1\"].text.lower())\n        self.assertTrue(\"your\" in map_results[\"6\"].text.lower())\n        self.assertTrue(\"mod\" in map_results[\"6\"].text.lower())\n        self.assertTrue(\"your\" in map_results[\"2\"].text.lower())\n        self.assertEqual(len([result.pk for result in autocomplete_2]), 13)\n\n        # Test multiple fields.\n        autocomplete_3 = self.sqs.autocomplete(text_auto=\"Django\", name_auto=\"dan\")\n        self.assertEqual(autocomplete_3.count(), 4)\n        self.assertEqual(\n            set([result.pk for result in autocomplete_3]), {\"12\", \"1\", \"22\", \"14\"}\n        )\n        self.assertEqual(len([result.pk for result in autocomplete_3]), 4)\n\n        # Test numbers in phrases\n        autocomplete_4 = self.sqs.autocomplete(text_auto=\"Jen 867\")\n        self.assertEqual(autocomplete_4.count(), 1)\n        self.assertEqual(set([result.pk for result in autocomplete_4]), {\"20\"})\n\n        # Test numbers alone\n        autocomplete_4 = self.sqs.autocomplete(text_auto=\"867\")\n        self.assertEqual(autocomplete_4.count(), 1)\n        self.assertEqual(set([result.pk for result in autocomplete_4]), {\"20\"})\n\n\nclass LiveElasticsearch2RoundTripTestCase(TestCase):\n    def setUp(self):\n        super().setUp()\n\n        # Wipe it clean.\n        clear_elasticsearch_index()\n\n        # Stow.\n        self.old_ui = connections[\"elasticsearch\"].get_unified_index()\n        self.ui = UnifiedIndex()\n        self.srtsi = Elasticsearch2RoundTripSearchIndex()\n        self.ui.build(indexes=[self.srtsi])\n        connections[\"elasticsearch\"]._index = self.ui\n        self.sb = connections[\"elasticsearch\"].get_backend()\n\n        self.sqs = SearchQuerySet(\"elasticsearch\")\n\n        # Fake indexing.\n        mock = MockModel()\n        mock.id = 1\n        self.sb.update(self.srtsi, [mock])\n\n    def tearDown(self):\n        # Restore.\n        connections[\"elasticsearch\"]._index = self.old_ui\n        super().tearDown()\n\n    def test_round_trip(self):\n        results = self.sqs.filter(id=\"core.mockmodel.1\")\n\n        # Sanity check.\n        self.assertEqual(results.count(), 1)\n\n        # Check the individual fields.\n        result = results[0]\n        self.assertEqual(result.id, \"core.mockmodel.1\")\n        self.assertEqual(result.text, \"This is some example text.\")\n        self.assertEqual(result.name, \"Mister Pants\")\n        self.assertEqual(result.is_active, True)\n        self.assertEqual(result.post_count, 25)\n        self.assertEqual(result.average_rating, 3.6)\n        self.assertEqual(result.price, \"24.99\")\n        self.assertEqual(result.pub_date, datetime.date(2009, 11, 21))\n        self.assertEqual(result.created, datetime.datetime(2009, 11, 21, 21, 31, 00))\n        self.assertEqual(result.tags, [\"staff\", \"outdoor\", \"activist\", \"scientist\"])\n        self.assertEqual(result.sites, [3, 5, 1])\n\n\nclass LiveElasticsearch2PickleTestCase(TestCase):\n    fixtures = [\"bulk_data.json\"]\n\n    def setUp(self):\n        super().setUp()\n\n        # Wipe it clean.\n        clear_elasticsearch_index()\n\n        # Stow.\n        self.old_ui = connections[\"elasticsearch\"].get_unified_index()\n        self.ui = UnifiedIndex()\n        self.smmi = Elasticsearch2MockModelSearchIndex()\n        self.sammi = Elasticsearch2AnotherMockModelSearchIndex()\n        self.ui.build(indexes=[self.smmi, self.sammi])\n        connections[\"elasticsearch\"]._index = self.ui\n\n        self.sqs = SearchQuerySet(\"elasticsearch\")\n\n        self.smmi.update(using=\"elasticsearch\")\n        self.sammi.update(using=\"elasticsearch\")\n\n    def tearDown(self):\n        # Restore.\n        connections[\"elasticsearch\"]._index = self.old_ui\n        super().tearDown()\n\n    def test_pickling(self):\n        results = self.sqs.all()\n\n        for res in results:\n            # Make sure the cache is full.\n            pass\n\n        in_a_pickle = pickle.dumps(results)\n        like_a_cuke = pickle.loads(in_a_pickle)\n        self.assertEqual(len(like_a_cuke), len(results))\n        self.assertEqual(like_a_cuke[0].id, results[0].id)\n\n\nclass Elasticsearch2BoostBackendTestCase(TestCase):\n    def setUp(self):\n        super().setUp()\n\n        # Wipe it clean.\n        self.raw_es = elasticsearch.Elasticsearch(\n            settings.HAYSTACK_CONNECTIONS[\"elasticsearch\"][\"URL\"]\n        )\n        clear_elasticsearch_index()\n\n        # Stow.\n        self.old_ui = connections[\"elasticsearch\"].get_unified_index()\n        self.ui = UnifiedIndex()\n        self.smmi = Elasticsearch2BoostMockSearchIndex()\n        self.ui.build(indexes=[self.smmi])\n        connections[\"elasticsearch\"]._index = self.ui\n        self.sb = connections[\"elasticsearch\"].get_backend()\n\n        self.sample_objs = []\n\n        for i in range(1, 5):\n            mock = AFourthMockModel()\n            mock.id = i\n\n            if i % 2:\n                mock.author = \"daniel\"\n                mock.editor = \"david\"\n            else:\n                mock.author = \"david\"\n                mock.editor = \"daniel\"\n\n            mock.pub_date = datetime.date(2009, 2, 25) - datetime.timedelta(days=i)\n            self.sample_objs.append(mock)\n\n    def tearDown(self):\n        connections[\"elasticsearch\"]._index = self.old_ui\n        super().tearDown()\n\n    def raw_search(self, query):\n        return self.raw_es.search(\n            q=\"*:*\", index=settings.HAYSTACK_CONNECTIONS[\"elasticsearch\"][\"INDEX_NAME\"]\n        )\n\n    def test_boost(self):\n        self.sb.update(self.smmi, self.sample_objs)\n        self.assertEqual(self.raw_search(\"*:*\")[\"hits\"][\"total\"], 4)\n\n        results = SearchQuerySet(using=\"elasticsearch\").filter(\n            SQ(author=\"daniel\") | SQ(editor=\"daniel\")\n        )\n\n        self.assertEqual(\n            set([result.id for result in results]),\n            {\n                \"core.afourthmockmodel.4\",\n                \"core.afourthmockmodel.3\",\n                \"core.afourthmockmodel.1\",\n                \"core.afourthmockmodel.2\",\n            },\n        )\n\n    def test__to_python(self):\n        self.assertEqual(self.sb._to_python(\"abc\"), \"abc\")\n        self.assertEqual(self.sb._to_python(\"1\"), 1)\n        self.assertEqual(self.sb._to_python(\"2653\"), 2653)\n        self.assertEqual(self.sb._to_python(\"25.5\"), 25.5)\n        self.assertEqual(self.sb._to_python(\"[1, 2, 3]\"), [1, 2, 3])\n        self.assertEqual(\n            self.sb._to_python('{\"a\": 1, \"b\": 2, \"c\": 3}'), {\"a\": 1, \"c\": 3, \"b\": 2}\n        )\n        self.assertEqual(\n            self.sb._to_python(\"2009-05-09T16:14:00\"),\n            datetime.datetime(2009, 5, 9, 16, 14),\n        )\n        self.assertEqual(\n            self.sb._to_python(\"2009-05-09T00:00:00\"),\n            datetime.datetime(2009, 5, 9, 0, 0),\n        )\n        self.assertEqual(self.sb._to_python(None), None)\n\n\nclass RecreateIndexTestCase(TestCase):\n    def setUp(self):\n        self.raw_es = elasticsearch.Elasticsearch(\n            settings.HAYSTACK_CONNECTIONS[\"elasticsearch\"][\"URL\"]\n        )\n\n    def test_recreate_index(self):\n        clear_elasticsearch_index()\n\n        sb = connections[\"elasticsearch\"].get_backend()\n        sb.silently_fail = True\n        sb.setup()\n\n        original_mapping = self.raw_es.indices.get_mapping(index=sb.index_name)\n\n        sb.clear()\n        sb.setup()\n\n        try:\n            updated_mapping = self.raw_es.indices.get_mapping(sb.index_name)\n        except elasticsearch.NotFoundError:\n            self.fail(\"There is no mapping after recreating the index\")\n\n        self.assertEqual(\n            original_mapping,\n            updated_mapping,\n            \"Mapping after recreating the index differs from the original one\",\n        )\n\n\nclass Elasticsearch2FacetingTestCase(TestCase):\n    def setUp(self):\n        super().setUp()\n\n        # Wipe it clean.\n        clear_elasticsearch_index()\n\n        # Stow.\n        self.old_ui = connections[\"elasticsearch\"].get_unified_index()\n        self.ui = UnifiedIndex()\n        self.smmi = Elasticsearch2FacetingMockSearchIndex()\n        self.ui.build(indexes=[self.smmi])\n        connections[\"elasticsearch\"]._index = self.ui\n        self.sb = connections[\"elasticsearch\"].get_backend()\n\n        # Force the backend to rebuild the mapping each time.\n        self.sb.existing_mapping = {}\n        self.sb.setup()\n\n        self.sample_objs = []\n\n        for i in range(1, 10):\n            mock = AFourthMockModel()\n            mock.id = i\n            if i > 5:\n                mock.editor = \"George Taylor\"\n            else:\n                mock.editor = \"Perry White\"\n            if i % 2:\n                mock.author = \"Daniel Lindsley\"\n            else:\n                mock.author = \"Dan Watson\"\n            mock.pub_date = datetime.date(2013, 9, (i % 4) + 1)\n            self.sample_objs.append(mock)\n\n    def tearDown(self):\n        connections[\"elasticsearch\"]._index = self.old_ui\n        super().tearDown()\n\n    def test_facet(self):\n        self.sb.update(self.smmi, self.sample_objs)\n        counts = (\n            SearchQuerySet(\"elasticsearch\")\n            .facet(\"author\")\n            .facet(\"editor\")\n            .facet_counts()\n        )\n        self.assertEqual(\n            counts[\"fields\"][\"author\"], [(\"Daniel Lindsley\", 5), (\"Dan Watson\", 4)]\n        )\n        self.assertEqual(\n            counts[\"fields\"][\"editor\"], [(\"Perry White\", 5), (\"George Taylor\", 4)]\n        )\n        counts = (\n            SearchQuerySet(\"elasticsearch\")\n            .filter(content=\"white\")\n            .facet(\"facet_field\", order=\"reverse_count\")\n            .facet_counts()\n        )\n        self.assertEqual(\n            counts[\"fields\"][\"facet_field\"], [(\"Dan Watson\", 2), (\"Daniel Lindsley\", 3)]\n        )\n\n    def test_multiple_narrow(self):\n        self.sb.update(self.smmi, self.sample_objs)\n        counts = (\n            SearchQuerySet(\"elasticsearch\")\n            .narrow('editor_exact:\"Perry White\"')\n            .narrow('author_exact:\"Daniel Lindsley\"')\n            .facet(\"author\")\n            .facet_counts()\n        )\n        self.assertEqual(counts[\"fields\"][\"author\"], [(\"Daniel Lindsley\", 3)])\n\n    def test_narrow(self):\n        self.sb.update(self.smmi, self.sample_objs)\n        counts = (\n            SearchQuerySet(\"elasticsearch\")\n            .facet(\"author\")\n            .facet(\"editor\")\n            .narrow('editor_exact:\"Perry White\"')\n            .facet_counts()\n        )\n        self.assertEqual(\n            counts[\"fields\"][\"author\"], [(\"Daniel Lindsley\", 3), (\"Dan Watson\", 2)]\n        )\n        self.assertEqual(counts[\"fields\"][\"editor\"], [(\"Perry White\", 5)])\n\n    def test_date_facet(self):\n        self.sb.update(self.smmi, self.sample_objs)\n        start = datetime.date(2013, 9, 1)\n        end = datetime.date(2013, 9, 30)\n        # Facet by day\n        counts = (\n            SearchQuerySet(\"elasticsearch\")\n            .date_facet(\"pub_date\", start_date=start, end_date=end, gap_by=\"day\")\n            .facet_counts()\n        )\n        self.assertEqual(\n            counts[\"dates\"][\"pub_date\"],\n            [\n                (datetime.datetime(2013, 9, 1), 2),\n                (datetime.datetime(2013, 9, 2), 3),\n                (datetime.datetime(2013, 9, 3), 2),\n                (datetime.datetime(2013, 9, 4), 2),\n            ],\n        )\n        # By month\n        counts = (\n            SearchQuerySet(\"elasticsearch\")\n            .date_facet(\"pub_date\", start_date=start, end_date=end, gap_by=\"month\")\n            .facet_counts()\n        )\n        self.assertEqual(\n            counts[\"dates\"][\"pub_date\"], [(datetime.datetime(2013, 9, 1), 9)]\n        )\n"
  },
  {
    "path": "test_haystack/elasticsearch2_tests/test_inputs.py",
    "content": "from django.test import TestCase\n\nfrom haystack import connections, inputs\n\n\nclass Elasticsearch2InputTestCase(TestCase):\n    def setUp(self):\n        super().setUp()\n        self.query_obj = connections[\"elasticsearch\"].get_query()\n\n    def test_raw_init(self):\n        raw = inputs.Raw(\"hello OR there, :you\")\n        self.assertEqual(raw.query_string, \"hello OR there, :you\")\n        self.assertEqual(raw.kwargs, {})\n        self.assertEqual(raw.post_process, False)\n\n        raw = inputs.Raw(\"hello OR there, :you\", test=\"really\")\n        self.assertEqual(raw.query_string, \"hello OR there, :you\")\n        self.assertEqual(raw.kwargs, {\"test\": \"really\"})\n        self.assertEqual(raw.post_process, False)\n\n    def test_raw_prepare(self):\n        raw = inputs.Raw(\"hello OR there, :you\")\n        self.assertEqual(raw.prepare(self.query_obj), \"hello OR there, :you\")\n\n    def test_clean_init(self):\n        clean = inputs.Clean(\"hello OR there, :you\")\n        self.assertEqual(clean.query_string, \"hello OR there, :you\")\n        self.assertEqual(clean.post_process, True)\n\n    def test_clean_prepare(self):\n        clean = inputs.Clean(\"hello OR there, :you\")\n        self.assertEqual(clean.prepare(self.query_obj), \"hello or there, \\\\:you\")\n\n    def test_exact_init(self):\n        exact = inputs.Exact(\"hello OR there, :you\")\n        self.assertEqual(exact.query_string, \"hello OR there, :you\")\n        self.assertEqual(exact.post_process, True)\n\n    def test_exact_prepare(self):\n        exact = inputs.Exact(\"hello OR there, :you\")\n        self.assertEqual(exact.prepare(self.query_obj), '\"hello OR there, :you\"')\n\n        exact = inputs.Exact(\"hello OR there, :you\", clean=True)\n        self.assertEqual(exact.prepare(self.query_obj), '\"hello or there, \\\\:you\"')\n\n    def test_not_init(self):\n        not_it = inputs.Not(\"hello OR there, :you\")\n        self.assertEqual(not_it.query_string, \"hello OR there, :you\")\n        self.assertEqual(not_it.post_process, True)\n\n    def test_not_prepare(self):\n        not_it = inputs.Not(\"hello OR there, :you\")\n        self.assertEqual(not_it.prepare(self.query_obj), \"NOT (hello or there, \\\\:you)\")\n\n    def test_autoquery_init(self):\n        autoquery = inputs.AutoQuery('panic -don\\'t \"froody dude\"')\n        self.assertEqual(autoquery.query_string, 'panic -don\\'t \"froody dude\"')\n        self.assertEqual(autoquery.post_process, False)\n\n    def test_autoquery_prepare(self):\n        autoquery = inputs.AutoQuery('panic -don\\'t \"froody dude\"')\n        self.assertEqual(\n            autoquery.prepare(self.query_obj), 'panic NOT don\\'t \"froody dude\"'\n        )\n\n    def test_altparser_init(self):\n        altparser = inputs.AltParser(\"dismax\")\n        self.assertEqual(altparser.parser_name, \"dismax\")\n        self.assertEqual(altparser.query_string, \"\")\n        self.assertEqual(altparser.kwargs, {})\n        self.assertEqual(altparser.post_process, False)\n\n        altparser = inputs.AltParser(\"dismax\", \"douglas adams\", qf=\"author\", mm=1)\n        self.assertEqual(altparser.parser_name, \"dismax\")\n        self.assertEqual(altparser.query_string, \"douglas adams\")\n        self.assertEqual(altparser.kwargs, {\"mm\": 1, \"qf\": \"author\"})\n        self.assertEqual(altparser.post_process, False)\n\n    def test_altparser_prepare(self):\n        altparser = inputs.AltParser(\"dismax\", \"douglas adams\", qf=\"author\", mm=1)\n        self.assertEqual(\n            altparser.prepare(self.query_obj),\n            \"\"\"{!dismax mm=1 qf=author v='douglas adams'}\"\"\",\n        )\n"
  },
  {
    "path": "test_haystack/elasticsearch2_tests/test_query.py",
    "content": "import datetime\n\nimport elasticsearch\nfrom django.contrib.gis.measure import D\nfrom django.test import TestCase\n\nfrom haystack import connections\nfrom haystack.inputs import Exact\nfrom haystack.models import SearchResult\nfrom haystack.query import SQ, SearchQuerySet\n\nfrom ..core.models import AnotherMockModel, MockModel\n\n\nclass Elasticsearch2SearchQueryTestCase(TestCase):\n    def setUp(self):\n        super().setUp()\n        self.sq = connections[\"elasticsearch\"].get_query()\n\n    def test_build_query_all(self):\n        self.assertEqual(self.sq.build_query(), \"*:*\")\n\n    def test_build_query_single_word(self):\n        self.sq.add_filter(SQ(content=\"hello\"))\n        self.assertEqual(self.sq.build_query(), \"(hello)\")\n\n    def test_build_query_boolean(self):\n        self.sq.add_filter(SQ(content=True))\n        self.assertEqual(self.sq.build_query(), \"(True)\")\n\n    def test_regression_slash_search(self):\n        self.sq.add_filter(SQ(content=\"hello/\"))\n        self.assertEqual(self.sq.build_query(), \"(hello\\\\/)\")\n\n    def test_build_query_datetime(self):\n        self.sq.add_filter(SQ(content=datetime.datetime(2009, 5, 8, 11, 28)))\n        self.assertEqual(self.sq.build_query(), \"(2009-05-08T11:28:00)\")\n\n    def test_build_query_multiple_words_and(self):\n        self.sq.add_filter(SQ(content=\"hello\"))\n        self.sq.add_filter(SQ(content=\"world\"))\n        self.assertEqual(self.sq.build_query(), \"((hello) AND (world))\")\n\n    def test_build_query_multiple_words_not(self):\n        self.sq.add_filter(~SQ(content=\"hello\"))\n        self.sq.add_filter(~SQ(content=\"world\"))\n        self.assertEqual(self.sq.build_query(), \"(NOT ((hello)) AND NOT ((world)))\")\n\n    def test_build_query_multiple_words_or(self):\n        self.sq.add_filter(~SQ(content=\"hello\"))\n        self.sq.add_filter(SQ(content=\"hello\"), use_or=True)\n        self.assertEqual(self.sq.build_query(), \"(NOT ((hello)) OR (hello))\")\n\n    def test_build_query_multiple_words_mixed(self):\n        self.sq.add_filter(SQ(content=\"why\"))\n        self.sq.add_filter(SQ(content=\"hello\"), use_or=True)\n        self.sq.add_filter(~SQ(content=\"world\"))\n        self.assertEqual(\n            self.sq.build_query(), \"(((why) OR (hello)) AND NOT ((world)))\"\n        )\n\n    def test_build_query_phrase(self):\n        self.sq.add_filter(SQ(content=\"hello world\"))\n        self.assertEqual(self.sq.build_query(), \"(hello AND world)\")\n\n        self.sq.add_filter(SQ(content__exact=\"hello world\"))\n        self.assertEqual(\n            self.sq.build_query(), '((hello AND world) AND (\"hello world\"))'\n        )\n\n    def test_build_query_boost(self):\n        self.sq.add_filter(SQ(content=\"hello\"))\n        self.sq.add_boost(\"world\", 5)\n        self.assertEqual(self.sq.build_query(), \"(hello) world^5\")\n\n    def test_build_query_multiple_filter_types(self):\n        self.sq.add_filter(SQ(content=\"why\"))\n        self.sq.add_filter(SQ(pub_date__lte=Exact(\"2009-02-10 01:59:00\")))\n        self.sq.add_filter(SQ(author__gt=\"daniel\"))\n        self.sq.add_filter(SQ(created__lt=Exact(\"2009-02-12 12:13:00\")))\n        self.sq.add_filter(SQ(title__gte=\"B\"))\n        self.sq.add_filter(SQ(id__in=[1, 2, 3]))\n        self.sq.add_filter(SQ(rating__range=[3, 5]))\n        self.assertEqual(\n            self.sq.build_query(),\n            '((why) AND pub_date:([* TO \"2009-02-10 01:59:00\"]) AND author:({\"daniel\" TO *}) AND created:({* TO \"2009-02-12 12:13:00\"}) AND title:([\"B\" TO *]) AND id:(\"1\" OR \"2\" OR \"3\") AND rating:([\"3\" TO \"5\"]))',\n        )\n\n    def test_build_query_multiple_filter_types_with_datetimes(self):\n        self.sq.add_filter(SQ(content=\"why\"))\n        self.sq.add_filter(SQ(pub_date__lte=datetime.datetime(2009, 2, 10, 1, 59, 0)))\n        self.sq.add_filter(SQ(author__gt=\"daniel\"))\n        self.sq.add_filter(SQ(created__lt=datetime.datetime(2009, 2, 12, 12, 13, 0)))\n        self.sq.add_filter(SQ(title__gte=\"B\"))\n        self.sq.add_filter(SQ(id__in=[1, 2, 3]))\n        self.sq.add_filter(SQ(rating__range=[3, 5]))\n        self.assertEqual(\n            self.sq.build_query(),\n            '((why) AND pub_date:([* TO \"2009-02-10T01:59:00\"]) AND author:({\"daniel\" TO *}) AND created:({* TO \"2009-02-12T12:13:00\"}) AND title:([\"B\" TO *]) AND id:(\"1\" OR \"2\" OR \"3\") AND rating:([\"3\" TO \"5\"]))',\n        )\n\n    def test_build_query_in_filter_multiple_words(self):\n        self.sq.add_filter(SQ(content=\"why\"))\n        self.sq.add_filter(SQ(title__in=[\"A Famous Paper\", \"An Infamous Article\"]))\n        self.assertEqual(\n            self.sq.build_query(),\n            '((why) AND title:(\"A Famous Paper\" OR \"An Infamous Article\"))',\n        )\n\n    def test_build_query_in_filter_datetime(self):\n        self.sq.add_filter(SQ(content=\"why\"))\n        self.sq.add_filter(SQ(pub_date__in=[datetime.datetime(2009, 7, 6, 1, 56, 21)]))\n        self.assertEqual(\n            self.sq.build_query(), '((why) AND pub_date:(\"2009-07-06T01:56:21\"))'\n        )\n\n    def test_build_query_in_with_set(self):\n        self.sq.add_filter(SQ(content=\"why\"))\n        self.sq.add_filter(SQ(title__in={\"A Famous Paper\", \"An Infamous Article\"}))\n        self.assertTrue(\"((why) AND title:(\" in self.sq.build_query())\n        self.assertTrue('\"A Famous Paper\"' in self.sq.build_query())\n        self.assertTrue('\"An Infamous Article\"' in self.sq.build_query())\n\n    def test_build_query_wildcard_filter_types(self):\n        self.sq.add_filter(SQ(content=\"why\"))\n        self.sq.add_filter(SQ(title__startswith=\"haystack\"))\n        self.assertEqual(self.sq.build_query(), \"((why) AND title:(haystack*))\")\n\n    def test_build_query_fuzzy_filter_types(self):\n        self.sq.add_filter(SQ(content=\"why\"))\n        self.sq.add_filter(SQ(title__fuzzy=\"haystack\"))\n        self.assertEqual(self.sq.build_query(), \"((why) AND title:(haystack~))\")\n\n    def test_clean(self):\n        self.assertEqual(self.sq.clean(\"hello world\"), \"hello world\")\n        self.assertEqual(self.sq.clean(\"hello AND world\"), \"hello and world\")\n        self.assertEqual(\n            self.sq.clean(\n                r'hello AND OR NOT TO + - && || ! ( ) { } [ ] ^ \" ~ * ? : \\ / world'\n            ),\n            'hello and or not to \\\\+ \\\\- \\\\&& \\\\|| \\\\! \\\\( \\\\) \\\\{ \\\\} \\\\[ \\\\] \\\\^ \\\\\" \\\\~ \\\\* \\\\? \\\\: \\\\\\\\ \\\\/ world',\n        )\n        self.assertEqual(\n            self.sq.clean(\"so please NOTe i am in a bAND and bORed\"),\n            \"so please NOTe i am in a bAND and bORed\",\n        )\n\n    def test_build_query_with_models(self):\n        self.sq.add_filter(SQ(content=\"hello\"))\n        self.sq.add_model(MockModel)\n        self.assertEqual(self.sq.build_query(), \"(hello)\")\n\n        self.sq.add_model(AnotherMockModel)\n        self.assertEqual(self.sq.build_query(), \"(hello)\")\n\n    def test_set_result_class(self):\n        # Assert that we're defaulting to ``SearchResult``.\n        self.assertTrue(issubclass(self.sq.result_class, SearchResult))\n\n        # Custom class.\n        class IttyBittyResult(object):\n            pass\n\n        self.sq.set_result_class(IttyBittyResult)\n        self.assertTrue(issubclass(self.sq.result_class, IttyBittyResult))\n\n        # Reset to default.\n        self.sq.set_result_class(None)\n        self.assertTrue(issubclass(self.sq.result_class, SearchResult))\n\n    def test_in_filter_values_list(self):\n        self.sq.add_filter(SQ(content=\"why\"))\n        self.sq.add_filter(SQ(title__in=[1, 2, 3]))\n        self.assertEqual(self.sq.build_query(), '((why) AND title:(\"1\" OR \"2\" OR \"3\"))')\n\n    def test_narrow_sq(self):\n        sqs = SearchQuerySet(using=\"elasticsearch\").narrow(SQ(foo=\"moof\"))\n        self.assertTrue(isinstance(sqs, SearchQuerySet))\n        self.assertEqual(len(sqs.query.narrow_queries), 1)\n        self.assertEqual(sqs.query.narrow_queries.pop(), \"foo:(moof)\")\n\n\nclass Elasticsearch2SearchQuerySpatialBeforeReleaseTestCase(TestCase):\n    def setUp(self):\n        super().setUp()\n        self.backend = connections[\"elasticsearch\"].get_backend()\n        self._elasticsearch_version = elasticsearch.VERSION\n        elasticsearch.VERSION = (0, 9, 9)\n\n    def tearDown(self):\n        elasticsearch.VERSION = self._elasticsearch_version\n\n    def test_build_query_with_dwithin_range(self):\n        \"\"\"\n        Test build_search_kwargs with dwithin range for Elasticsearch versions < 1.0.0\n        \"\"\"\n        from django.contrib.gis.geos import Point\n\n        search_kwargs = self.backend.build_search_kwargs(\n            \"where\",\n            dwithin={\n                \"field\": \"location_field\",\n                \"point\": Point(1.2345678, 2.3456789),\n                \"distance\": D(m=500),\n            },\n        )\n        self.assertEqual(\n            search_kwargs[\"query\"][\"filtered\"][\"filter\"][\"bool\"][\"must\"][1][\n                \"geo_distance\"\n            ],\n            {\"distance\": 0.5, \"location_field\": {\"lat\": 2.3456789, \"lon\": 1.2345678}},\n        )\n\n\nclass Elasticsearch2SearchQuerySpatialAfterReleaseTestCase(TestCase):\n    def setUp(self):\n        super().setUp()\n        self.backend = connections[\"elasticsearch\"].get_backend()\n        self._elasticsearch_version = elasticsearch.VERSION\n        elasticsearch.VERSION = (1, 0, 0)\n\n    def tearDown(self):\n        elasticsearch.VERSION = self._elasticsearch_version\n\n    def test_build_query_with_dwithin_range(self):\n        \"\"\"\n        Test build_search_kwargs with dwithin range for Elasticsearch versions >= 1.0.0\n        \"\"\"\n        from django.contrib.gis.geos import Point\n\n        search_kwargs = self.backend.build_search_kwargs(\n            \"where\",\n            dwithin={\n                \"field\": \"location_field\",\n                \"point\": Point(1.2345678, 2.3456789),\n                \"distance\": D(m=500),\n            },\n        )\n        self.assertEqual(\n            search_kwargs[\"query\"][\"filtered\"][\"filter\"][\"bool\"][\"must\"][1][\n                \"geo_distance\"\n            ],\n            {\n                \"distance\": \"0.500000km\",\n                \"location_field\": {\"lat\": 2.3456789, \"lon\": 1.2345678},\n            },\n        )\n"
  },
  {
    "path": "test_haystack/elasticsearch5_tests/__init__.py",
    "content": "import unittest\nimport warnings\n\nfrom django.conf import settings\n\nfrom haystack.utils import log as logging\n\nwarnings.simplefilter(\"ignore\", Warning)\n\n\ndef setup():\n    log = logging.getLogger(\"haystack\")\n    try:\n        import elasticsearch\n\n        if not ((5, 0, 0) <= elasticsearch.__version__ < (6, 0, 0)):\n            raise ImportError\n        from elasticsearch import Elasticsearch, exceptions\n    except ImportError:\n        log.error(\n            \"Skipping ElasticSearch 5 tests: 'elasticsearch>=5.0.0,<6.0.0' not installed.\"\n        )\n        raise unittest.SkipTest(\"'elasticsearch>=5.0.0,<6.0.0' not installed.\")\n\n    url = settings.HAYSTACK_CONNECTIONS[\"elasticsearch\"][\"URL\"]\n    es = Elasticsearch(url)\n    try:\n        es.info()\n    except exceptions.ConnectionError as e:\n        log.error(\"elasticsearch not running on %r\" % url, exc_info=True)\n        raise unittest.SkipTest(\"elasticsearch not running on %r\" % url, e)\n"
  },
  {
    "path": "test_haystack/elasticsearch5_tests/test_backend.py",
    "content": "import datetime\nimport logging as std_logging\nimport operator\nimport pickle\nimport unittest\nfrom decimal import Decimal\n\nimport elasticsearch\nfrom django.apps import apps\nfrom django.conf import settings\nfrom django.test import TestCase\nfrom django.test.utils import override_settings\n\nfrom haystack import connections, indexes, reset_search_queries\nfrom haystack.exceptions import SkipDocument\nfrom haystack.inputs import AutoQuery\nfrom haystack.models import SearchResult\nfrom haystack.query import SQ, RelatedSearchQuerySet, SearchQuerySet\nfrom haystack.utils import log as logging\nfrom haystack.utils.loading import UnifiedIndex\n\nfrom ..core.models import AFourthMockModel, AnotherMockModel, ASixthMockModel, MockModel\nfrom ..mocks import MockSearchResult\n\n\ndef clear_elasticsearch_index():\n    # Wipe it clean.\n    raw_es = elasticsearch.Elasticsearch(\n        settings.HAYSTACK_CONNECTIONS[\"elasticsearch\"][\"URL\"]\n    )\n    try:\n        raw_es.indices.delete(\n            index=settings.HAYSTACK_CONNECTIONS[\"elasticsearch\"][\"INDEX_NAME\"]\n        )\n        raw_es.indices.refresh()\n    except elasticsearch.TransportError:\n        pass\n\n    # Since we've just completely deleted the index, we'll reset setup_complete so the next access will\n    # correctly define the mappings:\n    connections[\"elasticsearch\"].get_backend().setup_complete = False\n\n\nclass Elasticsearch5MockSearchIndex(indexes.SearchIndex, indexes.Indexable):\n    text = indexes.CharField(document=True, use_template=True)\n    name = indexes.CharField(model_attr=\"author\", faceted=True)\n    pub_date = indexes.DateTimeField(model_attr=\"pub_date\")\n\n    def get_model(self):\n        return MockModel\n\n\nclass Elasticsearch5MockSearchIndexWithSkipDocument(Elasticsearch5MockSearchIndex):\n    def prepare_text(self, obj):\n        if obj.author == \"daniel3\":\n            raise SkipDocument\n        return \"Indexed!\\n%s\" % obj.id\n\n\nclass Elasticsearch5MockSpellingIndex(indexes.SearchIndex, indexes.Indexable):\n    text = indexes.CharField(document=True)\n    name = indexes.CharField(model_attr=\"author\", faceted=True)\n    pub_date = indexes.DateTimeField(model_attr=\"pub_date\")\n\n    def get_model(self):\n        return MockModel\n\n    def prepare_text(self, obj):\n        return obj.foo\n\n\nclass Elasticsearch5MaintainTypeMockSearchIndex(indexes.SearchIndex, indexes.Indexable):\n    text = indexes.CharField(document=True, use_template=True)\n    month = indexes.CharField(indexed=False)\n    pub_date = indexes.DateTimeField(model_attr=\"pub_date\")\n\n    def prepare_month(self, obj):\n        return \"%02d\" % obj.pub_date.month\n\n    def get_model(self):\n        return MockModel\n\n\nclass Elasticsearch5MockModelSearchIndex(indexes.SearchIndex, indexes.Indexable):\n    text = indexes.CharField(model_attr=\"foo\", document=True)\n    name = indexes.CharField(model_attr=\"author\")\n    pub_date = indexes.DateTimeField(model_attr=\"pub_date\")\n\n    def get_model(self):\n        return MockModel\n\n\nclass Elasticsearch5AnotherMockModelSearchIndex(indexes.SearchIndex, indexes.Indexable):\n    text = indexes.CharField(document=True)\n    name = indexes.CharField(model_attr=\"author\")\n    pub_date = indexes.DateTimeField(model_attr=\"pub_date\")\n\n    def get_model(self):\n        return AnotherMockModel\n\n    def prepare_text(self, obj):\n        return \"You might be searching for the user %s\" % obj.author\n\n\nclass Elasticsearch5BoostMockSearchIndex(indexes.SearchIndex, indexes.Indexable):\n    text = indexes.CharField(\n        document=True,\n        use_template=True,\n        template_name=\"search/indexes/core/mockmodel_template.txt\",\n    )\n    author = indexes.CharField(model_attr=\"author\", weight=2.0)\n    editor = indexes.CharField(model_attr=\"editor\")\n    pub_date = indexes.DateTimeField(model_attr=\"pub_date\")\n\n    def get_model(self):\n        return AFourthMockModel\n\n    def prepare(self, obj):\n        data = super().prepare(obj)\n\n        if obj.pk == 4:\n            data[\"boost\"] = 5.0\n\n        return data\n\n\nclass Elasticsearch5FacetingMockSearchIndex(indexes.SearchIndex, indexes.Indexable):\n    text = indexes.CharField(document=True)\n    author = indexes.CharField(model_attr=\"author\", faceted=True)\n    editor = indexes.CharField(model_attr=\"editor\", faceted=True)\n    pub_date = indexes.DateField(model_attr=\"pub_date\", faceted=True)\n    facet_field = indexes.FacetCharField(model_attr=\"author\")\n\n    def prepare_text(self, obj):\n        return \"%s %s\" % (obj.author, obj.editor)\n\n    def get_model(self):\n        return AFourthMockModel\n\n\nclass Elasticsearch5RoundTripSearchIndex(indexes.SearchIndex, indexes.Indexable):\n    text = indexes.CharField(document=True, default=\"\")\n    name = indexes.CharField()\n    is_active = indexes.BooleanField()\n    post_count = indexes.IntegerField()\n    average_rating = indexes.FloatField()\n    price = indexes.DecimalField()\n    pub_date = indexes.DateField()\n    created = indexes.DateTimeField()\n    tags = indexes.MultiValueField()\n    sites = indexes.MultiValueField()\n\n    def get_model(self):\n        return MockModel\n\n    def prepare(self, obj):\n        prepped = super().prepare(obj)\n        prepped.update(\n            {\n                \"text\": \"This is some example text.\",\n                \"name\": \"Mister Pants\",\n                \"is_active\": True,\n                \"post_count\": 25,\n                \"average_rating\": 3.6,\n                \"price\": Decimal(\"24.99\"),\n                \"pub_date\": datetime.date(2009, 11, 21),\n                \"created\": datetime.datetime(2009, 11, 21, 21, 31, 00),\n                \"tags\": [\"staff\", \"outdoor\", \"activist\", \"scientist\"],\n                \"sites\": [3, 5, 1],\n            }\n        )\n        return prepped\n\n\nclass Elasticsearch5ComplexFacetsMockSearchIndex(\n    indexes.SearchIndex, indexes.Indexable\n):\n    text = indexes.CharField(document=True, default=\"\")\n    name = indexes.CharField(faceted=True)\n    is_active = indexes.BooleanField(faceted=True)\n    post_count = indexes.IntegerField()\n    post_count_i = indexes.FacetIntegerField(facet_for=\"post_count\")\n    average_rating = indexes.FloatField(faceted=True)\n    pub_date = indexes.DateField(faceted=True)\n    created = indexes.DateTimeField(faceted=True)\n    sites = indexes.MultiValueField(faceted=True)\n\n    def get_model(self):\n        return MockModel\n\n\nclass Elasticsearch5AutocompleteMockModelSearchIndex(\n    indexes.SearchIndex, indexes.Indexable\n):\n    text = indexes.CharField(model_attr=\"foo\", document=True)\n    name = indexes.CharField(model_attr=\"author\")\n    pub_date = indexes.DateTimeField(model_attr=\"pub_date\")\n    text_auto = indexes.EdgeNgramField(model_attr=\"foo\")\n    name_auto = indexes.EdgeNgramField(model_attr=\"author\")\n\n    def get_model(self):\n        return MockModel\n\n\nclass Elasticsearch5SpatialSearchIndex(indexes.SearchIndex, indexes.Indexable):\n    text = indexes.CharField(model_attr=\"name\", document=True)\n    location = indexes.LocationField()\n\n    def prepare_location(self, obj):\n        return \"%s,%s\" % (obj.lat, obj.lon)\n\n    def get_model(self):\n        return ASixthMockModel\n\n\nclass TestSettings(TestCase):\n    def test_kwargs_are_passed_on(self):\n        from haystack.backends.elasticsearch_backend import ElasticsearchSearchBackend\n\n        backend = ElasticsearchSearchBackend(\n            \"alias\",\n            **{\n                \"URL\": settings.HAYSTACK_CONNECTIONS[\"elasticsearch\"][\"URL\"],\n                \"INDEX_NAME\": \"testing\",\n                \"KWARGS\": {\"max_retries\": 42},\n            }\n        )\n\n        self.assertEqual(backend.conn.transport.max_retries, 42)\n\n\nclass Elasticsearch5SearchBackendTestCase(TestCase):\n    def setUp(self):\n        super().setUp()\n\n        # Wipe it clean.\n        self.raw_es = elasticsearch.Elasticsearch(\n            settings.HAYSTACK_CONNECTIONS[\"elasticsearch\"][\"URL\"]\n        )\n        clear_elasticsearch_index()\n\n        # Stow.\n        self.old_ui = connections[\"elasticsearch\"].get_unified_index()\n        self.ui = UnifiedIndex()\n        self.smmi = Elasticsearch5MockSearchIndex()\n        self.smmidni = Elasticsearch5MockSearchIndexWithSkipDocument()\n        self.smtmmi = Elasticsearch5MaintainTypeMockSearchIndex()\n        self.ui.build(indexes=[self.smmi])\n        connections[\"elasticsearch\"]._index = self.ui\n        self.sb = connections[\"elasticsearch\"].get_backend()\n\n        # Force the backend to rebuild the mapping each time.\n        self.sb.existing_mapping = {}\n        self.sb.setup()\n\n        self.sample_objs = []\n\n        for i in range(1, 4):\n            mock = MockModel()\n            mock.id = i\n            mock.author = \"daniel%s\" % i\n            mock.pub_date = datetime.date(2009, 2, 25) - datetime.timedelta(days=i)\n            self.sample_objs.append(mock)\n\n    def tearDown(self):\n        connections[\"elasticsearch\"]._index = self.old_ui\n        super().tearDown()\n        self.sb.silently_fail = True\n\n    def raw_search(self, query):\n        try:\n            return self.raw_es.search(\n                q=\"*:*\",\n                index=settings.HAYSTACK_CONNECTIONS[\"elasticsearch\"][\"INDEX_NAME\"],\n            )\n        except elasticsearch.TransportError:\n            return {}\n\n    def test_non_silent(self):\n        bad_sb = connections[\"elasticsearch\"].backend(\n            \"bad\",\n            URL=\"http://omg.wtf.bbq:1000/\",\n            INDEX_NAME=\"whatver\",\n            SILENTLY_FAIL=False,\n            TIMEOUT=1,\n        )\n\n        try:\n            bad_sb.update(self.smmi, self.sample_objs)\n            self.fail()\n        except:\n            pass\n\n        try:\n            bad_sb.remove(\"core.mockmodel.1\")\n            self.fail()\n        except:\n            pass\n\n        try:\n            bad_sb.clear()\n            self.fail()\n        except:\n            pass\n\n        try:\n            bad_sb.search(\"foo\")\n            self.fail()\n        except:\n            pass\n\n    def test_update_no_documents(self):\n        url = settings.HAYSTACK_CONNECTIONS[\"elasticsearch\"][\"URL\"]\n        index_name = settings.HAYSTACK_CONNECTIONS[\"elasticsearch\"][\"INDEX_NAME\"]\n\n        sb = connections[\"elasticsearch\"].backend(\n            \"elasticsearch\", URL=url, INDEX_NAME=index_name, SILENTLY_FAIL=True\n        )\n        self.assertEqual(sb.update(self.smmi, []), None)\n\n        sb = connections[\"elasticsearch\"].backend(\n            \"elasticsearch\", URL=url, INDEX_NAME=index_name, SILENTLY_FAIL=False\n        )\n        try:\n            sb.update(self.smmi, [])\n            self.fail()\n        except:\n            pass\n\n    def test_update(self):\n        self.sb.update(self.smmi, self.sample_objs)\n\n        # Check what Elasticsearch thinks is there.\n        self.assertEqual(self.raw_search(\"*:*\")[\"hits\"][\"total\"], 3)\n        self.assertEqual(\n            sorted(\n                [res[\"_source\"] for res in self.raw_search(\"*:*\")[\"hits\"][\"hits\"]],\n                key=lambda x: x[\"id\"],\n            ),\n            [\n                {\n                    \"django_id\": \"1\",\n                    \"django_ct\": \"core.mockmodel\",\n                    \"name\": \"daniel1\",\n                    \"name_exact\": \"daniel1\",\n                    \"text\": \"Indexed!\\n1\",\n                    \"pub_date\": \"2009-02-24T00:00:00\",\n                    \"id\": \"core.mockmodel.1\",\n                },\n                {\n                    \"django_id\": \"2\",\n                    \"django_ct\": \"core.mockmodel\",\n                    \"name\": \"daniel2\",\n                    \"name_exact\": \"daniel2\",\n                    \"text\": \"Indexed!\\n2\",\n                    \"pub_date\": \"2009-02-23T00:00:00\",\n                    \"id\": \"core.mockmodel.2\",\n                },\n                {\n                    \"django_id\": \"3\",\n                    \"django_ct\": \"core.mockmodel\",\n                    \"name\": \"daniel3\",\n                    \"name_exact\": \"daniel3\",\n                    \"text\": \"Indexed!\\n3\",\n                    \"pub_date\": \"2009-02-22T00:00:00\",\n                    \"id\": \"core.mockmodel.3\",\n                },\n            ],\n        )\n\n    def test_update_with_SkipDocument_raised(self):\n        self.sb.update(self.smmidni, self.sample_objs)\n\n        # Check what Elasticsearch thinks is there.\n        res = self.raw_search(\"*:*\")[\"hits\"]\n        self.assertEqual(res[\"total\"], 2)\n        self.assertListEqual(\n            sorted([x[\"_source\"][\"id\"] for x in res[\"hits\"]]),\n            [\"core.mockmodel.1\", \"core.mockmodel.2\"],\n        )\n\n    def test_remove(self):\n        self.sb.update(self.smmi, self.sample_objs)\n        self.assertEqual(self.raw_search(\"*:*\")[\"hits\"][\"total\"], 3)\n\n        self.sb.remove(self.sample_objs[0])\n        self.assertEqual(self.raw_search(\"*:*\")[\"hits\"][\"total\"], 2)\n        self.assertEqual(\n            sorted(\n                [res[\"_source\"] for res in self.raw_search(\"*:*\")[\"hits\"][\"hits\"]],\n                key=operator.itemgetter(\"django_id\"),\n            ),\n            [\n                {\n                    \"django_id\": \"2\",\n                    \"django_ct\": \"core.mockmodel\",\n                    \"name\": \"daniel2\",\n                    \"name_exact\": \"daniel2\",\n                    \"text\": \"Indexed!\\n2\",\n                    \"pub_date\": \"2009-02-23T00:00:00\",\n                    \"id\": \"core.mockmodel.2\",\n                },\n                {\n                    \"django_id\": \"3\",\n                    \"django_ct\": \"core.mockmodel\",\n                    \"name\": \"daniel3\",\n                    \"name_exact\": \"daniel3\",\n                    \"text\": \"Indexed!\\n3\",\n                    \"pub_date\": \"2009-02-22T00:00:00\",\n                    \"id\": \"core.mockmodel.3\",\n                },\n            ],\n        )\n\n    def test_remove_succeeds_on_404(self):\n        self.sb.silently_fail = False\n        self.sb.remove(\"core.mockmodel.421\")\n\n    def test_clear(self):\n        self.sb.update(self.smmi, self.sample_objs)\n        self.assertEqual(self.raw_search(\"*:*\").get(\"hits\", {}).get(\"total\", 0), 3)\n\n        self.sb.clear()\n        self.assertEqual(self.raw_search(\"*:*\").get(\"hits\", {}).get(\"total\", 0), 0)\n\n        self.sb.update(self.smmi, self.sample_objs)\n        self.assertEqual(self.raw_search(\"*:*\").get(\"hits\", {}).get(\"total\", 0), 3)\n\n        self.sb.clear([AnotherMockModel])\n        self.assertEqual(self.raw_search(\"*:*\").get(\"hits\", {}).get(\"total\", 0), 3)\n\n        self.sb.clear([MockModel])\n        self.assertEqual(self.raw_search(\"*:*\").get(\"hits\", {}).get(\"total\", 0), 0)\n\n        self.sb.update(self.smmi, self.sample_objs)\n        self.assertEqual(self.raw_search(\"*:*\").get(\"hits\", {}).get(\"total\", 0), 3)\n\n        self.sb.clear([AnotherMockModel, MockModel])\n        self.assertEqual(self.raw_search(\"*:*\").get(\"hits\", {}).get(\"total\", 0), 0)\n\n    def test_search(self):\n        self.sb.update(self.smmi, self.sample_objs)\n        self.assertEqual(self.raw_search(\"*:*\")[\"hits\"][\"total\"], 3)\n\n        self.assertEqual(self.sb.search(\"\"), {\"hits\": 0, \"results\": []})\n        self.assertEqual(self.sb.search(\"*:*\")[\"hits\"], 3)\n        self.assertEqual(\n            set([result.pk for result in self.sb.search(\"*:*\")[\"results\"]]),\n            {\"2\", \"1\", \"3\"},\n        )\n\n        self.assertEqual(self.sb.search(\"\", highlight=True), {\"hits\": 0, \"results\": []})\n        self.assertEqual(self.sb.search(\"Index\", highlight=True)[\"hits\"], 3)\n        self.assertEqual(\n            sorted(\n                [\n                    result.highlighted[0]\n                    for result in self.sb.search(\"Index\", highlight=True)[\"results\"]\n                ]\n            ),\n            [\"<em>Indexed</em>!\\n1\", \"<em>Indexed</em>!\\n2\", \"<em>Indexed</em>!\\n3\"],\n        )\n\n        self.assertEqual(self.sb.search(\"Indx\")[\"hits\"], 0)\n        self.assertEqual(self.sb.search(\"indaxed\")[\"spelling_suggestion\"], \"indexed\")\n        self.assertEqual(\n            self.sb.search(\"arf\", spelling_query=\"indexyd\")[\"spelling_suggestion\"],\n            \"indexed\",\n        )\n\n        self.assertEqual(\n            self.sb.search(\"\", facets={\"name\": {}}), {\"hits\": 0, \"results\": []}\n        )\n        results = self.sb.search(\"Index\", facets={\"name\": {}})\n        self.assertEqual(results[\"hits\"], 3)\n        self.assertSetEqual(\n            set(results[\"facets\"][\"fields\"][\"name\"]),\n            {(\"daniel3\", 1), (\"daniel2\", 1), (\"daniel1\", 1)},\n        )\n\n        self.assertEqual(\n            self.sb.search(\n                \"\",\n                date_facets={\n                    \"pub_date\": {\n                        \"start_date\": datetime.date(2008, 1, 1),\n                        \"end_date\": datetime.date(2009, 4, 1),\n                        \"gap_by\": \"month\",\n                        \"gap_amount\": 1,\n                    }\n                },\n            ),\n            {\"hits\": 0, \"results\": []},\n        )\n        results = self.sb.search(\n            \"Index\",\n            date_facets={\n                \"pub_date\": {\n                    \"start_date\": datetime.date(2008, 1, 1),\n                    \"end_date\": datetime.date(2009, 4, 1),\n                    \"gap_by\": \"month\",\n                    \"gap_amount\": 1,\n                }\n            },\n        )\n        self.assertEqual(results[\"hits\"], 3)\n        self.assertEqual(\n            results[\"facets\"][\"dates\"][\"pub_date\"],\n            [(datetime.datetime(2009, 2, 1, 0, 0), 3)],\n        )\n\n        self.assertEqual(\n            self.sb.search(\"\", query_facets=[(\"name\", \"[* TO e]\")]),\n            {\"hits\": 0, \"results\": []},\n        )\n        results = self.sb.search(\"Index\", query_facets=[(\"name\", \"[* TO e]\")])\n        self.assertEqual(results[\"hits\"], 3)\n        self.assertEqual(results[\"facets\"][\"queries\"], {\"name\": 3})\n\n        self.assertEqual(\n            self.sb.search(\"\", narrow_queries={\"name:daniel1\"}),\n            {\"hits\": 0, \"results\": []},\n        )\n        results = self.sb.search(\"Index\", narrow_queries={\"name:daniel1\"})\n        self.assertEqual(results[\"hits\"], 1)\n\n        # Ensure that swapping the ``result_class`` works.\n        self.assertTrue(\n            isinstance(\n                self.sb.search(\"index\", result_class=MockSearchResult)[\"results\"][0],\n                MockSearchResult,\n            )\n        )\n\n        # Check the use of ``limit_to_registered_models``.\n        self.assertEqual(\n            self.sb.search(\"\", limit_to_registered_models=False),\n            {\"hits\": 0, \"results\": []},\n        )\n        self.assertEqual(\n            self.sb.search(\"*:*\", limit_to_registered_models=False)[\"hits\"], 3\n        )\n        self.assertEqual(\n            sorted(\n                [\n                    result.pk\n                    for result in self.sb.search(\n                        \"*:*\", limit_to_registered_models=False\n                    )[\"results\"]\n                ]\n            ),\n            [\"1\", \"2\", \"3\"],\n        )\n\n        # Stow.\n        old_limit_to_registered_models = getattr(\n            settings, \"HAYSTACK_LIMIT_TO_REGISTERED_MODELS\", True\n        )\n        settings.HAYSTACK_LIMIT_TO_REGISTERED_MODELS = False\n\n        self.assertEqual(self.sb.search(\"\"), {\"hits\": 0, \"results\": []})\n        self.assertEqual(self.sb.search(\"*:*\")[\"hits\"], 3)\n        self.assertEqual(\n            sorted([result.pk for result in self.sb.search(\"*:*\")[\"results\"]]),\n            [\"1\", \"2\", \"3\"],\n        )\n\n        # Restore.\n        settings.HAYSTACK_LIMIT_TO_REGISTERED_MODELS = old_limit_to_registered_models\n\n    def test_spatial_search_parameters(self):\n        from django.contrib.gis.geos import Point\n\n        p1 = Point(1.23, 4.56)\n        kwargs = self.sb.build_search_kwargs(\n            \"*:*\",\n            distance_point={\"field\": \"location\", \"point\": p1},\n            sort_by=((\"distance\", \"desc\"),),\n        )\n\n        self.assertIn(\"sort\", kwargs)\n        self.assertEqual(1, len(kwargs[\"sort\"]))\n        geo_d = kwargs[\"sort\"][0][\"_geo_distance\"]\n\n        # ElasticSearch supports the GeoJSON-style lng, lat pairs so unlike Solr the values should be\n        # in the same order as we used to create the Point():\n        # http://www.elasticsearch.org/guide/en/elasticsearch/reference/current/query-dsl-geo-distance-filter.html#_lat_lon_as_array_4\n\n        self.assertDictEqual(\n            geo_d, {\"location\": [1.23, 4.56], \"unit\": \"km\", \"order\": \"desc\"}\n        )\n\n    def test_more_like_this(self):\n        self.sb.update(self.smmi, self.sample_objs)\n        self.assertEqual(self.raw_search(\"*:*\")[\"hits\"][\"total\"], 3)\n\n        # A functional MLT example with enough data to work is below. Rely on\n        # this to ensure the API is correct enough.\n        self.assertEqual(self.sb.more_like_this(self.sample_objs[0])[\"hits\"], 0)\n        self.assertEqual(\n            [\n                result.pk\n                for result in self.sb.more_like_this(self.sample_objs[0])[\"results\"]\n            ],\n            [],\n        )\n\n    def test_build_schema(self):\n        old_ui = connections[\"elasticsearch\"].get_unified_index()\n\n        (content_field_name, mapping) = self.sb.build_schema(old_ui.all_searchfields())\n        self.assertEqual(content_field_name, \"text\")\n        self.assertEqual(len(mapping), 4 + 2)  # +2 management fields\n        self.assertEqual(\n            mapping,\n            {\n                \"django_id\": {\n                    \"index\": \"not_analyzed\",\n                    \"type\": \"string\",\n                    \"include_in_all\": False,\n                },\n                \"django_ct\": {\n                    \"index\": \"not_analyzed\",\n                    \"type\": \"string\",\n                    \"include_in_all\": False,\n                },\n                \"text\": {\"type\": \"string\", \"analyzer\": \"snowball\"},\n                \"pub_date\": {\"type\": \"date\"},\n                \"name\": {\"type\": \"string\", \"analyzer\": \"snowball\"},\n                \"name_exact\": {\"index\": \"not_analyzed\", \"type\": \"string\"},\n            },\n        )\n\n        ui = UnifiedIndex()\n        ui.build(indexes=[Elasticsearch5ComplexFacetsMockSearchIndex()])\n        (content_field_name, mapping) = self.sb.build_schema(ui.all_searchfields())\n        self.assertEqual(content_field_name, \"text\")\n        self.assertEqual(len(mapping), 15 + 2)  # +2 management fields\n        self.assertEqual(\n            mapping,\n            {\n                \"django_id\": {\n                    \"index\": \"not_analyzed\",\n                    \"type\": \"string\",\n                    \"include_in_all\": False,\n                },\n                \"django_ct\": {\n                    \"index\": \"not_analyzed\",\n                    \"type\": \"string\",\n                    \"include_in_all\": False,\n                },\n                \"name\": {\"type\": \"string\", \"analyzer\": \"snowball\"},\n                \"is_active_exact\": {\"type\": \"boolean\"},\n                \"created\": {\"type\": \"date\"},\n                \"post_count\": {\"type\": \"long\"},\n                \"created_exact\": {\"type\": \"date\"},\n                \"sites_exact\": {\"index\": \"not_analyzed\", \"type\": \"string\"},\n                \"is_active\": {\"type\": \"boolean\"},\n                \"sites\": {\"type\": \"string\", \"analyzer\": \"snowball\"},\n                \"post_count_i\": {\"type\": \"long\"},\n                \"average_rating\": {\"type\": \"float\"},\n                \"text\": {\"type\": \"string\", \"analyzer\": \"snowball\"},\n                \"pub_date_exact\": {\"type\": \"date\"},\n                \"name_exact\": {\"index\": \"not_analyzed\", \"type\": \"string\"},\n                \"pub_date\": {\"type\": \"date\"},\n                \"average_rating_exact\": {\"type\": \"float\"},\n            },\n        )\n\n    def test_verify_type(self):\n        old_ui = connections[\"elasticsearch\"].get_unified_index()\n        ui = UnifiedIndex()\n        smtmmi = Elasticsearch5MaintainTypeMockSearchIndex()\n        ui.build(indexes=[smtmmi])\n        connections[\"elasticsearch\"]._index = ui\n        sb = connections[\"elasticsearch\"].get_backend()\n        sb.update(smtmmi, self.sample_objs)\n\n        self.assertEqual(sb.search(\"*:*\")[\"hits\"], 3)\n        self.assertEqual(\n            [result.month for result in sb.search(\"*:*\")[\"results\"]], [\"02\", \"02\", \"02\"]\n        )\n        connections[\"elasticsearch\"]._index = old_ui\n\n\nclass CaptureHandler(std_logging.Handler):\n    logs_seen = []\n\n    def emit(self, record):\n        CaptureHandler.logs_seen.append(record)\n\n\nclass FailedElasticsearch5SearchBackendTestCase(TestCase):\n    def setUp(self):\n        self.sample_objs = []\n\n        for i in range(1, 4):\n            mock = MockModel()\n            mock.id = i\n            mock.author = \"daniel%s\" % i\n            mock.pub_date = datetime.date(2009, 2, 25) - datetime.timedelta(days=i)\n            self.sample_objs.append(mock)\n\n        # Stow.\n        # Point the backend at a URL that doesn't exist so we can watch the\n        # sparks fly.\n        self.old_es_url = settings.HAYSTACK_CONNECTIONS[\"elasticsearch\"][\"URL\"]\n        settings.HAYSTACK_CONNECTIONS[\"elasticsearch\"][\"URL\"] = (\n            \"%s/foo/\" % self.old_es_url\n        )\n        self.cap = CaptureHandler()\n        logging.getLogger(\"haystack\").addHandler(self.cap)\n        config = apps.get_app_config(\"haystack\")\n        logging.getLogger(\"haystack\").removeHandler(config.stream)\n\n        # Setup the rest of the bits.\n        self.old_ui = connections[\"elasticsearch\"].get_unified_index()\n        ui = UnifiedIndex()\n        self.smmi = Elasticsearch5MockSearchIndex()\n        ui.build(indexes=[self.smmi])\n        connections[\"elasticsearch\"]._index = ui\n        self.sb = connections[\"elasticsearch\"].get_backend()\n\n    def tearDown(self):\n        # Restore.\n        settings.HAYSTACK_CONNECTIONS[\"elasticsearch\"][\"URL\"] = self.old_es_url\n        connections[\"elasticsearch\"]._index = self.old_ui\n        config = apps.get_app_config(\"haystack\")\n        logging.getLogger(\"haystack\").removeHandler(self.cap)\n        logging.getLogger(\"haystack\").addHandler(config.stream)\n\n    @unittest.expectedFailure\n    def test_all_cases(self):\n        # Prior to the addition of the try/except bits, these would all fail miserably.\n        self.assertEqual(len(CaptureHandler.logs_seen), 0)\n\n        self.sb.update(self.smmi, self.sample_objs)\n        self.assertEqual(len(CaptureHandler.logs_seen), 1)\n\n        self.sb.remove(self.sample_objs[0])\n        self.assertEqual(len(CaptureHandler.logs_seen), 2)\n\n        self.sb.search(\"search\")\n        self.assertEqual(len(CaptureHandler.logs_seen), 3)\n\n        self.sb.more_like_this(self.sample_objs[0])\n        self.assertEqual(len(CaptureHandler.logs_seen), 4)\n\n        self.sb.clear([MockModel])\n        self.assertEqual(len(CaptureHandler.logs_seen), 5)\n\n        self.sb.clear()\n        self.assertEqual(len(CaptureHandler.logs_seen), 6)\n\n\nclass LiveElasticsearch5SearchQueryTestCase(TestCase):\n    fixtures = [\"base_data.json\"]\n\n    def setUp(self):\n        super().setUp()\n\n        # Wipe it clean.\n        clear_elasticsearch_index()\n\n        # Stow.\n        self.old_ui = connections[\"elasticsearch\"].get_unified_index()\n        self.ui = UnifiedIndex()\n        self.smmi = Elasticsearch5MockSearchIndex()\n        self.ui.build(indexes=[self.smmi])\n        connections[\"elasticsearch\"]._index = self.ui\n        self.sb = connections[\"elasticsearch\"].get_backend()\n        self.sq = connections[\"elasticsearch\"].get_query()\n\n        # Force indexing of the content.\n        self.smmi.update(using=\"elasticsearch\")\n\n    def tearDown(self):\n        connections[\"elasticsearch\"]._index = self.old_ui\n        super().tearDown()\n\n    def test_log_query(self):\n        reset_search_queries()\n        self.assertEqual(len(connections[\"elasticsearch\"].queries), 0)\n\n        with self.settings(DEBUG=False):\n            len(self.sq.get_results())\n            self.assertEqual(len(connections[\"elasticsearch\"].queries), 0)\n\n        with self.settings(DEBUG=True):\n            # Redefine it to clear out the cached results.\n            self.sq = connections[\"elasticsearch\"].query(using=\"elasticsearch\")\n            self.sq.add_filter(SQ(name=\"bar\"))\n            len(self.sq.get_results())\n            self.assertEqual(len(connections[\"elasticsearch\"].queries), 1)\n            self.assertEqual(\n                connections[\"elasticsearch\"].queries[0][\"query_string\"], \"name:(bar)\"\n            )\n\n            # And again, for good measure.\n            self.sq = connections[\"elasticsearch\"].query(\"elasticsearch\")\n            self.sq.add_filter(SQ(name=\"bar\"))\n            self.sq.add_filter(SQ(text=\"moof\"))\n            len(self.sq.get_results())\n            self.assertEqual(len(connections[\"elasticsearch\"].queries), 2)\n            self.assertEqual(\n                connections[\"elasticsearch\"].queries[0][\"query_string\"], \"name:(bar)\"\n            )\n            self.assertEqual(\n                connections[\"elasticsearch\"].queries[1][\"query_string\"],\n                \"(name:(bar) AND text:(moof))\",\n            )\n\n\nlssqstc_all_loaded = None\n\n\n@override_settings(DEBUG=True)\nclass LiveElasticsearch5SearchQuerySetTestCase(TestCase):\n    \"\"\"Used to test actual implementation details of the SearchQuerySet.\"\"\"\n\n    fixtures = [\"bulk_data.json\"]\n\n    def setUp(self):\n        super().setUp()\n\n        # Stow.\n        self.old_ui = connections[\"elasticsearch\"].get_unified_index()\n        self.ui = UnifiedIndex()\n        self.smmi = Elasticsearch5MockSearchIndex()\n        self.ui.build(indexes=[self.smmi])\n        connections[\"elasticsearch\"]._index = self.ui\n\n        self.sqs = SearchQuerySet(\"elasticsearch\")\n        self.rsqs = RelatedSearchQuerySet(\"elasticsearch\")\n\n        # Ugly but not constantly reindexing saves us almost 50% runtime.\n        global lssqstc_all_loaded\n\n        if lssqstc_all_loaded is None:\n            lssqstc_all_loaded = True\n\n            # Wipe it clean.\n            clear_elasticsearch_index()\n\n            # Force indexing of the content.\n            self.smmi.update(using=\"elasticsearch\")\n\n    def tearDown(self):\n        # Restore.\n        connections[\"elasticsearch\"]._index = self.old_ui\n        super().tearDown()\n\n    def test_load_all(self):\n        sqs = self.sqs.order_by(\"pub_date\").load_all()\n        self.assertTrue(isinstance(sqs, SearchQuerySet))\n        self.assertTrue(len(sqs) > 0)\n        self.assertEqual(\n            sqs[2].object.foo,\n            \"In addition, you may specify other fields to be populated along with the document. In this case, we also index the user who authored the document as well as the date the document was published. The variable you assign the SearchField to should directly map to the field your search backend is expecting. You instantiate most search fields with a parameter that points to the attribute of the object to populate that field with.\",\n        )\n\n    def test_iter(self):\n        reset_search_queries()\n        self.assertEqual(len(connections[\"elasticsearch\"].queries), 0)\n        sqs = self.sqs.all()\n        results = sorted([int(result.pk) for result in sqs])\n        self.assertEqual(results, list(range(1, 24)))\n        self.assertEqual(len(connections[\"elasticsearch\"].queries), 3)\n\n    def test_slice(self):\n        reset_search_queries()\n        self.assertEqual(len(connections[\"elasticsearch\"].queries), 0)\n        results = self.sqs.all().order_by(\"pub_date\")\n        self.assertEqual(\n            [int(result.pk) for result in results[1:11]],\n            [3, 2, 4, 5, 6, 7, 8, 9, 10, 11],\n        )\n        self.assertEqual(len(connections[\"elasticsearch\"].queries), 1)\n\n        reset_search_queries()\n        self.assertEqual(len(connections[\"elasticsearch\"].queries), 0)\n        results = self.sqs.all().order_by(\"pub_date\")\n        self.assertEqual(int(results[21].pk), 22)\n        self.assertEqual(len(connections[\"elasticsearch\"].queries), 1)\n\n    def test_values_slicing(self):\n        reset_search_queries()\n        self.assertEqual(len(connections[\"elasticsearch\"].queries), 0)\n\n        # TODO: this would be a good candidate for refactoring into a TestCase subclass shared across backends\n\n        # The values will come back as strings because Hasytack doesn't assume PKs are integers.\n        # We'll prepare this set once since we're going to query the same results in multiple ways:\n        expected_pks = [str(i) for i in [3, 2, 4, 5, 6, 7, 8, 9, 10, 11]]\n\n        results = self.sqs.all().order_by(\"pub_date\").values(\"pk\")\n        self.assertListEqual([i[\"pk\"] for i in results[1:11]], expected_pks)\n\n        results = self.sqs.all().order_by(\"pub_date\").values_list(\"pk\")\n        self.assertListEqual([i[0] for i in results[1:11]], expected_pks)\n\n        results = self.sqs.all().order_by(\"pub_date\").values_list(\"pk\", flat=True)\n        self.assertListEqual(results[1:11], expected_pks)\n\n        self.assertEqual(len(connections[\"elasticsearch\"].queries), 3)\n\n    def test_count(self):\n        reset_search_queries()\n        self.assertEqual(len(connections[\"elasticsearch\"].queries), 0)\n        sqs = self.sqs.all()\n        self.assertEqual(sqs.count(), 23)\n        self.assertEqual(sqs.count(), 23)\n        self.assertEqual(len(sqs), 23)\n        self.assertEqual(sqs.count(), 23)\n        # Should only execute one query to count the length of the result set.\n        self.assertEqual(len(connections[\"elasticsearch\"].queries), 1)\n\n    def test_manual_iter(self):\n        results = self.sqs.all()\n\n        reset_search_queries()\n        self.assertEqual(len(connections[\"elasticsearch\"].queries), 0)\n        results = set([int(result.pk) for result in results._manual_iter()])\n        self.assertEqual(\n            results,\n            {\n                2,\n                7,\n                12,\n                17,\n                1,\n                6,\n                11,\n                16,\n                23,\n                5,\n                10,\n                15,\n                22,\n                4,\n                9,\n                14,\n                19,\n                21,\n                3,\n                8,\n                13,\n                18,\n                20,\n            },\n        )\n        self.assertEqual(len(connections[\"elasticsearch\"].queries), 3)\n\n    def test_fill_cache(self):\n        reset_search_queries()\n        self.assertEqual(len(connections[\"elasticsearch\"].queries), 0)\n        results = self.sqs.all()\n        self.assertEqual(len(results._result_cache), 0)\n        self.assertEqual(len(connections[\"elasticsearch\"].queries), 0)\n        results._fill_cache(0, 10)\n        self.assertEqual(\n            len([result for result in results._result_cache if result is not None]), 10\n        )\n        self.assertEqual(len(connections[\"elasticsearch\"].queries), 1)\n        results._fill_cache(10, 20)\n        self.assertEqual(\n            len([result for result in results._result_cache if result is not None]), 20\n        )\n        self.assertEqual(len(connections[\"elasticsearch\"].queries), 2)\n\n    def test_cache_is_full(self):\n        reset_search_queries()\n        self.assertEqual(len(connections[\"elasticsearch\"].queries), 0)\n        self.assertEqual(self.sqs._cache_is_full(), False)\n        results = self.sqs.all()\n        fire_the_iterator_and_fill_cache = [result for result in results]\n        self.assertEqual(results._cache_is_full(), True)\n        self.assertEqual(len(connections[\"elasticsearch\"].queries), 3)\n\n    def test___and__(self):\n        sqs1 = self.sqs.filter(content=\"foo\")\n        sqs2 = self.sqs.filter(content=\"bar\")\n        sqs = sqs1 & sqs2\n\n        self.assertTrue(isinstance(sqs, SearchQuerySet))\n        self.assertEqual(len(sqs.query.query_filter), 2)\n        self.assertEqual(sqs.query.build_query(), \"((foo) AND (bar))\")\n\n        # Now for something more complex...\n        sqs3 = self.sqs.exclude(title=\"moof\").filter(\n            SQ(content=\"foo\") | SQ(content=\"baz\")\n        )\n        sqs4 = self.sqs.filter(content=\"bar\")\n        sqs = sqs3 & sqs4\n\n        self.assertTrue(isinstance(sqs, SearchQuerySet))\n        self.assertEqual(len(sqs.query.query_filter), 3)\n        self.assertEqual(\n            sqs.query.build_query(),\n            \"(NOT (title:(moof)) AND ((foo) OR (baz)) AND (bar))\",\n        )\n\n    def test___or__(self):\n        sqs1 = self.sqs.filter(content=\"foo\")\n        sqs2 = self.sqs.filter(content=\"bar\")\n        sqs = sqs1 | sqs2\n\n        self.assertTrue(isinstance(sqs, SearchQuerySet))\n        self.assertEqual(len(sqs.query.query_filter), 2)\n        self.assertEqual(sqs.query.build_query(), \"((foo) OR (bar))\")\n\n        # Now for something more complex...\n        sqs3 = self.sqs.exclude(title=\"moof\").filter(\n            SQ(content=\"foo\") | SQ(content=\"baz\")\n        )\n        sqs4 = self.sqs.filter(content=\"bar\").models(MockModel)\n        sqs = sqs3 | sqs4\n\n        self.assertTrue(isinstance(sqs, SearchQuerySet))\n        self.assertEqual(len(sqs.query.query_filter), 2)\n        self.assertEqual(\n            sqs.query.build_query(),\n            \"((NOT (title:(moof)) AND ((foo) OR (baz))) OR (bar))\",\n        )\n\n    def test_auto_query(self):\n        # Ensure bits in exact matches get escaped properly as well.\n        # This will break horrifically if escaping isn't working.\n        sqs = self.sqs.auto_query('\"pants:rule\"')\n        self.assertTrue(isinstance(sqs, SearchQuerySet))\n        self.assertEqual(\n            repr(sqs.query.query_filter), '<SQ: AND content__content=\"pants:rule\">'\n        )\n        self.assertEqual(sqs.query.build_query(), '(\"pants\\\\:rule\")')\n        self.assertEqual(len(sqs), 0)\n\n    # Regressions\n\n    def test_regression_proper_start_offsets(self):\n        sqs = self.sqs.filter(text=\"index\")\n        self.assertNotEqual(sqs.count(), 0)\n\n        id_counts = {}\n\n        for item in sqs:\n            if item.id in id_counts:\n                id_counts[item.id] += 1\n            else:\n                id_counts[item.id] = 1\n\n        for key, value in id_counts.items():\n            if value > 1:\n                self.fail(\n                    \"Result with id '%s' seen more than once in the results.\" % key\n                )\n\n    def test_regression_raw_search_breaks_slicing(self):\n        sqs = self.sqs.raw_search(\"text:index\")\n        page_1 = [result.pk for result in sqs[0:10]]\n        page_2 = [result.pk for result in sqs[10:20]]\n\n        for pk in page_2:\n            if pk in page_1:\n                self.fail(\n                    \"Result with id '%s' seen more than once in the results.\" % pk\n                )\n\n    # RelatedSearchQuerySet Tests\n\n    def test_related_load_all(self):\n        sqs = self.rsqs.order_by(\"pub_date\").load_all()\n        self.assertTrue(isinstance(sqs, SearchQuerySet))\n        self.assertTrue(len(sqs) > 0)\n        self.assertEqual(\n            sqs[2].object.foo,\n            \"In addition, you may specify other fields to be populated along with the document. In this case, we also index the user who authored the document as well as the date the document was published. The variable you assign the SearchField to should directly map to the field your search backend is expecting. You instantiate most search fields with a parameter that points to the attribute of the object to populate that field with.\",\n        )\n\n    def test_related_load_all_queryset(self):\n        sqs = self.rsqs.load_all().order_by(\"pub_date\")\n        self.assertEqual(len(sqs._load_all_querysets), 0)\n\n        sqs = sqs.load_all_queryset(MockModel, MockModel.objects.filter(id__gt=1))\n        self.assertTrue(isinstance(sqs, SearchQuerySet))\n        self.assertEqual(len(sqs._load_all_querysets), 1)\n        self.assertEqual(sorted([obj.object.id for obj in sqs]), list(range(2, 24)))\n\n        sqs = sqs.load_all_queryset(MockModel, MockModel.objects.filter(id__gt=10))\n        self.assertTrue(isinstance(sqs, SearchQuerySet))\n        self.assertEqual(len(sqs._load_all_querysets), 1)\n        self.assertEqual(\n            set([obj.object.id for obj in sqs]),\n            {12, 17, 11, 16, 23, 15, 22, 14, 19, 21, 13, 18, 20},\n        )\n        self.assertEqual(set([obj.object.id for obj in sqs[10:20]]), {21, 22, 23})\n\n    def test_related_iter(self):\n        reset_search_queries()\n        self.assertEqual(len(connections[\"elasticsearch\"].queries), 0)\n        sqs = self.rsqs.all()\n        results = set([int(result.pk) for result in sqs])\n        self.assertEqual(\n            results,\n            {\n                2,\n                7,\n                12,\n                17,\n                1,\n                6,\n                11,\n                16,\n                23,\n                5,\n                10,\n                15,\n                22,\n                4,\n                9,\n                14,\n                19,\n                21,\n                3,\n                8,\n                13,\n                18,\n                20,\n            },\n        )\n        self.assertEqual(len(connections[\"elasticsearch\"].queries), 3)\n\n    def test_related_slice(self):\n        reset_search_queries()\n        self.assertEqual(len(connections[\"elasticsearch\"].queries), 0)\n        results = self.rsqs.all().order_by(\"pub_date\")\n        self.assertEqual(\n            [int(result.pk) for result in results[1:11]],\n            [3, 2, 4, 5, 6, 7, 8, 9, 10, 11],\n        )\n        self.assertEqual(len(connections[\"elasticsearch\"].queries), 1)\n\n        reset_search_queries()\n        self.assertEqual(len(connections[\"elasticsearch\"].queries), 0)\n        results = self.rsqs.all().order_by(\"pub_date\")\n        self.assertEqual(int(results[21].pk), 22)\n        self.assertEqual(len(connections[\"elasticsearch\"].queries), 1)\n\n        reset_search_queries()\n        self.assertEqual(len(connections[\"elasticsearch\"].queries), 0)\n        results = self.rsqs.all().order_by(\"pub_date\")\n        self.assertEqual(\n            set([int(result.pk) for result in results[20:30]]), {21, 22, 23}\n        )\n        self.assertEqual(len(connections[\"elasticsearch\"].queries), 1)\n\n    def test_related_manual_iter(self):\n        results = self.rsqs.all()\n\n        reset_search_queries()\n        self.assertEqual(len(connections[\"elasticsearch\"].queries), 0)\n        results = sorted([int(result.pk) for result in results._manual_iter()])\n        self.assertEqual(results, list(range(1, 24)))\n        self.assertEqual(len(connections[\"elasticsearch\"].queries), 3)\n\n    def test_related_fill_cache(self):\n        reset_search_queries()\n        self.assertEqual(len(connections[\"elasticsearch\"].queries), 0)\n        results = self.rsqs.all()\n        self.assertEqual(len(results._result_cache), 0)\n        self.assertEqual(len(connections[\"elasticsearch\"].queries), 0)\n        results._fill_cache(0, 10)\n        self.assertEqual(\n            len([result for result in results._result_cache if result is not None]), 10\n        )\n        self.assertEqual(len(connections[\"elasticsearch\"].queries), 1)\n        results._fill_cache(10, 20)\n        self.assertEqual(\n            len([result for result in results._result_cache if result is not None]), 20\n        )\n        self.assertEqual(len(connections[\"elasticsearch\"].queries), 2)\n\n    def test_related_cache_is_full(self):\n        reset_search_queries()\n        self.assertEqual(len(connections[\"elasticsearch\"].queries), 0)\n        self.assertEqual(self.rsqs._cache_is_full(), False)\n        results = self.rsqs.all()\n        fire_the_iterator_and_fill_cache = [result for result in results]\n        self.assertEqual(results._cache_is_full(), True)\n        self.assertEqual(len(connections[\"elasticsearch\"].queries), 3)\n\n    def test_quotes_regression(self):\n        sqs = self.sqs.auto_query(\"44°48'40''N 20°28'32''E\")\n        # Should not have empty terms.\n        self.assertEqual(sqs.query.build_query(), \"(44\\xb048'40''N 20\\xb028'32''E)\")\n        # Should not cause Elasticsearch to 500.\n        self.assertEqual(sqs.count(), 0)\n\n        sqs = self.sqs.auto_query(\"blazing\")\n        self.assertEqual(sqs.query.build_query(), \"(blazing)\")\n        self.assertEqual(sqs.count(), 0)\n        sqs = self.sqs.auto_query(\"blazing saddles\")\n        self.assertEqual(sqs.query.build_query(), \"(blazing saddles)\")\n        self.assertEqual(sqs.count(), 0)\n        sqs = self.sqs.auto_query('\"blazing saddles')\n        self.assertEqual(sqs.query.build_query(), '(\\\\\"blazing saddles)')\n        self.assertEqual(sqs.count(), 0)\n        sqs = self.sqs.auto_query('\"blazing saddles\"')\n        self.assertEqual(sqs.query.build_query(), '(\"blazing saddles\")')\n        self.assertEqual(sqs.count(), 0)\n        sqs = self.sqs.auto_query('mel \"blazing saddles\"')\n        self.assertEqual(sqs.query.build_query(), '(mel \"blazing saddles\")')\n        self.assertEqual(sqs.count(), 0)\n        sqs = self.sqs.auto_query('mel \"blazing \\'saddles\"')\n        self.assertEqual(sqs.query.build_query(), '(mel \"blazing \\'saddles\")')\n        self.assertEqual(sqs.count(), 0)\n        sqs = self.sqs.auto_query(\"mel \\\"blazing ''saddles\\\"\")\n        self.assertEqual(sqs.query.build_query(), \"(mel \\\"blazing ''saddles\\\")\")\n        self.assertEqual(sqs.count(), 0)\n        sqs = self.sqs.auto_query(\"mel \\\"blazing ''saddles\\\"'\")\n        self.assertEqual(sqs.query.build_query(), \"(mel \\\"blazing ''saddles\\\" ')\")\n        self.assertEqual(sqs.count(), 0)\n        sqs = self.sqs.auto_query(\"mel \\\"blazing ''saddles\\\"'\\\"\")\n        self.assertEqual(sqs.query.build_query(), \"(mel \\\"blazing ''saddles\\\" '\\\\\\\")\")\n        self.assertEqual(sqs.count(), 0)\n        sqs = self.sqs.auto_query('\"blazing saddles\" mel')\n        self.assertEqual(sqs.query.build_query(), '(\"blazing saddles\" mel)')\n        self.assertEqual(sqs.count(), 0)\n        sqs = self.sqs.auto_query('\"blazing saddles\" mel brooks')\n        self.assertEqual(sqs.query.build_query(), '(\"blazing saddles\" mel brooks)')\n        self.assertEqual(sqs.count(), 0)\n        sqs = self.sqs.auto_query('mel \"blazing saddles\" brooks')\n        self.assertEqual(sqs.query.build_query(), '(mel \"blazing saddles\" brooks)')\n        self.assertEqual(sqs.count(), 0)\n        sqs = self.sqs.auto_query('mel \"blazing saddles\" \"brooks')\n        self.assertEqual(sqs.query.build_query(), '(mel \"blazing saddles\" \\\\\"brooks)')\n        self.assertEqual(sqs.count(), 0)\n\n    def test_query_generation(self):\n        sqs = self.sqs.filter(\n            SQ(content=AutoQuery(\"hello world\")) | SQ(title=AutoQuery(\"hello world\"))\n        )\n        self.assertEqual(\n            sqs.query.build_query(), \"((hello world) OR title:(hello world))\"\n        )\n\n    def test_result_class(self):\n        # Assert that we're defaulting to ``SearchResult``.\n        sqs = self.sqs.all()\n        self.assertTrue(isinstance(sqs[0], SearchResult))\n\n        # Custom class.\n        sqs = self.sqs.result_class(MockSearchResult).all()\n        self.assertTrue(isinstance(sqs[0], MockSearchResult))\n\n        # Reset to default.\n        sqs = self.sqs.result_class(None).all()\n        self.assertTrue(isinstance(sqs[0], SearchResult))\n\n\n@override_settings(DEBUG=True)\nclass LiveElasticsearch5SpellingTestCase(TestCase):\n    \"\"\"Used to test actual implementation details of the SearchQuerySet.\"\"\"\n\n    fixtures = [\"bulk_data.json\"]\n\n    def setUp(self):\n        super().setUp()\n\n        # Stow.\n        self.old_ui = connections[\"elasticsearch\"].get_unified_index()\n        self.ui = UnifiedIndex()\n        self.smmi = Elasticsearch5MockSpellingIndex()\n        self.ui.build(indexes=[self.smmi])\n        connections[\"elasticsearch\"]._index = self.ui\n\n        self.sqs = SearchQuerySet(\"elasticsearch\")\n\n        # Wipe it clean.\n        clear_elasticsearch_index()\n\n        # Reboot the schema.\n        self.sb = connections[\"elasticsearch\"].get_backend()\n        self.sb.setup()\n\n        self.smmi.update(using=\"elasticsearch\")\n\n    def tearDown(self):\n        # Restore.\n        connections[\"elasticsearch\"]._index = self.old_ui\n        super().tearDown()\n\n    def test_spelling(self):\n        self.assertEqual(\n            self.sqs.auto_query(\"structurd\").spelling_suggestion(), \"structured\"\n        )\n        self.assertEqual(self.sqs.spelling_suggestion(\"structurd\"), \"structured\")\n        self.assertEqual(\n            self.sqs.auto_query(\"srchindex instanc\").spelling_suggestion(),\n            \"searchindex instance\",\n        )\n        self.assertEqual(\n            self.sqs.spelling_suggestion(\"srchindex instanc\"), \"searchindex instance\"\n        )\n\n\nclass LiveElasticsearch5MoreLikeThisTestCase(TestCase):\n    fixtures = [\"bulk_data.json\"]\n\n    def setUp(self):\n        super().setUp()\n\n        # Wipe it clean.\n        clear_elasticsearch_index()\n\n        self.old_ui = connections[\"elasticsearch\"].get_unified_index()\n        self.ui = UnifiedIndex()\n        self.smmi = Elasticsearch5MockModelSearchIndex()\n        self.sammi = Elasticsearch5AnotherMockModelSearchIndex()\n        self.ui.build(indexes=[self.smmi, self.sammi])\n        connections[\"elasticsearch\"]._index = self.ui\n\n        self.sqs = SearchQuerySet(\"elasticsearch\")\n\n        self.smmi.update(using=\"elasticsearch\")\n        self.sammi.update(using=\"elasticsearch\")\n\n    def tearDown(self):\n        # Restore.\n        connections[\"elasticsearch\"]._index = self.old_ui\n        super().tearDown()\n\n    def test_more_like_this(self):\n        mlt = self.sqs.more_like_this(MockModel.objects.get(pk=1))\n        results = [result.pk for result in mlt]\n        self.assertEqual(mlt.count(), 11)\n        self.assertEqual(\n            set(results), {\"10\", \"5\", \"2\", \"21\", \"4\", \"6\", \"16\", \"9\", \"14\"}\n        )\n        self.assertEqual(len(results), 10)\n\n        alt_mlt = self.sqs.filter(name=\"daniel3\").more_like_this(\n            MockModel.objects.get(pk=2)\n        )\n        results = [result.pk for result in alt_mlt]\n        self.assertEqual(alt_mlt.count(), 9)\n        self.assertEqual(\n            set(results), {\"2\", \"16\", \"3\", \"19\", \"4\", \"17\", \"10\", \"22\", \"23\"}\n        )\n        self.assertEqual(len(results), 9)\n\n        alt_mlt_with_models = self.sqs.models(MockModel).more_like_this(\n            MockModel.objects.get(pk=1)\n        )\n        results = [result.pk for result in alt_mlt_with_models]\n        self.assertEqual(alt_mlt_with_models.count(), 10)\n        self.assertEqual(\n            set(results), {\"10\", \"5\", \"21\", \"2\", \"4\", \"6\", \"23\", \"9\", \"14\", \"16\"}\n        )\n        self.assertEqual(len(results), 10)\n\n        if hasattr(MockModel.objects, \"defer\"):\n            # Make sure MLT works with deferred bits.\n            qs = MockModel.objects.defer(\"foo\")\n            self.assertEqual(qs.query.deferred_loading[1], True)\n            deferred = self.sqs.models(MockModel).more_like_this(qs.get(pk=1))\n            self.assertEqual(deferred.count(), 10)\n            self.assertEqual(\n                {result.pk for result in deferred},\n                {\"10\", \"5\", \"21\", \"2\", \"4\", \"6\", \"23\", \"9\", \"14\", \"16\"},\n            )\n            self.assertEqual(len([result.pk for result in deferred]), 10)\n\n        # Ensure that swapping the ``result_class`` works.\n        self.assertTrue(\n            isinstance(\n                self.sqs.result_class(MockSearchResult).more_like_this(\n                    MockModel.objects.get(pk=1)\n                )[0],\n                MockSearchResult,\n            )\n        )\n\n\nclass LiveElasticsearch5AutocompleteTestCase(TestCase):\n    fixtures = [\"bulk_data.json\"]\n\n    def setUp(self):\n        super().setUp()\n\n        # Stow.\n        self.old_ui = connections[\"elasticsearch\"].get_unified_index()\n        self.ui = UnifiedIndex()\n        self.smmi = Elasticsearch5AutocompleteMockModelSearchIndex()\n        self.ui.build(indexes=[self.smmi])\n        connections[\"elasticsearch\"]._index = self.ui\n\n        self.sqs = SearchQuerySet(\"elasticsearch\")\n\n        # Wipe it clean.\n        clear_elasticsearch_index()\n\n        # Reboot the schema.\n        self.sb = connections[\"elasticsearch\"].get_backend()\n        self.sb.setup()\n\n        self.smmi.update(using=\"elasticsearch\")\n\n    def tearDown(self):\n        # Restore.\n        connections[\"elasticsearch\"]._index = self.old_ui\n        super().tearDown()\n\n    def test_build_schema(self):\n        self.sb = connections[\"elasticsearch\"].get_backend()\n        content_name, mapping = self.sb.build_schema(self.ui.all_searchfields())\n        self.assertEqual(\n            mapping,\n            {\n                \"django_id\": {\n                    \"index\": \"not_analyzed\",\n                    \"type\": \"string\",\n                    \"include_in_all\": False,\n                },\n                \"django_ct\": {\n                    \"index\": \"not_analyzed\",\n                    \"type\": \"string\",\n                    \"include_in_all\": False,\n                },\n                \"name_auto\": {\"type\": \"string\", \"analyzer\": \"edgengram_analyzer\"},\n                \"text\": {\"type\": \"string\", \"analyzer\": \"snowball\"},\n                \"pub_date\": {\"type\": \"date\"},\n                \"name\": {\"type\": \"string\", \"analyzer\": \"snowball\"},\n                \"text_auto\": {\"type\": \"string\", \"analyzer\": \"edgengram_analyzer\"},\n            },\n        )\n\n    def test_autocomplete(self):\n        autocomplete = self.sqs.autocomplete(text_auto=\"mod\")\n        self.assertEqual(autocomplete.count(), 16)\n        self.assertEqual(\n            set([result.pk for result in autocomplete]),\n            {\n                \"1\",\n                \"12\",\n                \"6\",\n                \"14\",\n                \"7\",\n                \"4\",\n                \"23\",\n                \"17\",\n                \"13\",\n                \"18\",\n                \"20\",\n                \"22\",\n                \"19\",\n                \"15\",\n                \"10\",\n                \"2\",\n            },\n        )\n        self.assertTrue(\"mod\" in autocomplete[0].text.lower())\n        self.assertTrue(\"mod\" in autocomplete[1].text.lower())\n        self.assertTrue(\"mod\" in autocomplete[6].text.lower())\n        self.assertTrue(\"mod\" in autocomplete[9].text.lower())\n        self.assertTrue(\"mod\" in autocomplete[13].text.lower())\n        self.assertEqual(len([result.pk for result in autocomplete]), 16)\n\n        # Test multiple words.\n        autocomplete_2 = self.sqs.autocomplete(text_auto=\"your mod\")\n        self.assertEqual(autocomplete_2.count(), 13)\n        self.assertEqual(\n            set([result.pk for result in autocomplete_2]),\n            {\"1\", \"6\", \"2\", \"14\", \"12\", \"13\", \"10\", \"19\", \"4\", \"20\", \"23\", \"22\", \"15\"},\n        )\n        map_results = {result.pk: result for result in autocomplete_2}\n        self.assertTrue(\"your\" in map_results[\"1\"].text.lower())\n        self.assertTrue(\"mod\" in map_results[\"1\"].text.lower())\n        self.assertTrue(\"your\" in map_results[\"6\"].text.lower())\n        self.assertTrue(\"mod\" in map_results[\"6\"].text.lower())\n        self.assertTrue(\"your\" in map_results[\"2\"].text.lower())\n        self.assertEqual(len([result.pk for result in autocomplete_2]), 13)\n\n        # Test multiple fields.\n        autocomplete_3 = self.sqs.autocomplete(text_auto=\"Django\", name_auto=\"dan\")\n        self.assertEqual(autocomplete_3.count(), 4)\n        self.assertEqual(\n            set([result.pk for result in autocomplete_3]), {\"12\", \"1\", \"22\", \"14\"}\n        )\n        self.assertEqual(len([result.pk for result in autocomplete_3]), 4)\n\n        # Test numbers in phrases\n        autocomplete_4 = self.sqs.autocomplete(text_auto=\"Jen 867\")\n        self.assertEqual(autocomplete_4.count(), 1)\n        self.assertEqual(set([result.pk for result in autocomplete_4]), {\"20\"})\n\n        # Test numbers alone\n        autocomplete_4 = self.sqs.autocomplete(text_auto=\"867\")\n        self.assertEqual(autocomplete_4.count(), 1)\n        self.assertEqual(set([result.pk for result in autocomplete_4]), {\"20\"})\n\n\nclass LiveElasticsearch5RoundTripTestCase(TestCase):\n    def setUp(self):\n        super().setUp()\n\n        # Wipe it clean.\n        clear_elasticsearch_index()\n\n        # Stow.\n        self.old_ui = connections[\"elasticsearch\"].get_unified_index()\n        self.ui = UnifiedIndex()\n        self.srtsi = Elasticsearch5RoundTripSearchIndex()\n        self.ui.build(indexes=[self.srtsi])\n        connections[\"elasticsearch\"]._index = self.ui\n        self.sb = connections[\"elasticsearch\"].get_backend()\n\n        self.sqs = SearchQuerySet(\"elasticsearch\")\n\n        # Fake indexing.\n        mock = MockModel()\n        mock.id = 1\n        self.sb.update(self.srtsi, [mock])\n\n    def tearDown(self):\n        # Restore.\n        connections[\"elasticsearch\"]._index = self.old_ui\n        super().tearDown()\n\n    def test_round_trip(self):\n        results = self.sqs.filter(id=\"core.mockmodel.1\")\n\n        # Sanity check.\n        self.assertEqual(results.count(), 1)\n\n        # Check the individual fields.\n        result = results[0]\n        self.assertEqual(result.id, \"core.mockmodel.1\")\n        self.assertEqual(result.text, \"This is some example text.\")\n        self.assertEqual(result.name, \"Mister Pants\")\n        self.assertEqual(result.is_active, True)\n        self.assertEqual(result.post_count, 25)\n        self.assertEqual(result.average_rating, 3.6)\n        self.assertEqual(result.price, \"24.99\")\n        self.assertEqual(result.pub_date, datetime.date(2009, 11, 21))\n        self.assertEqual(result.created, datetime.datetime(2009, 11, 21, 21, 31, 00))\n        self.assertEqual(result.tags, [\"staff\", \"outdoor\", \"activist\", \"scientist\"])\n        self.assertEqual(result.sites, [3, 5, 1])\n\n\nclass LiveElasticsearch5PickleTestCase(TestCase):\n    fixtures = [\"bulk_data.json\"]\n\n    def setUp(self):\n        super().setUp()\n\n        # Wipe it clean.\n        clear_elasticsearch_index()\n\n        # Stow.\n        self.old_ui = connections[\"elasticsearch\"].get_unified_index()\n        self.ui = UnifiedIndex()\n        self.smmi = Elasticsearch5MockModelSearchIndex()\n        self.sammi = Elasticsearch5AnotherMockModelSearchIndex()\n        self.ui.build(indexes=[self.smmi, self.sammi])\n        connections[\"elasticsearch\"]._index = self.ui\n\n        self.sqs = SearchQuerySet(\"elasticsearch\")\n\n        self.smmi.update(using=\"elasticsearch\")\n        self.sammi.update(using=\"elasticsearch\")\n\n    def tearDown(self):\n        # Restore.\n        connections[\"elasticsearch\"]._index = self.old_ui\n        super().tearDown()\n\n    def test_pickling(self):\n        results = self.sqs.all()\n\n        for res in results:\n            # Make sure the cache is full.\n            pass\n\n        in_a_pickle = pickle.dumps(results)\n        like_a_cuke = pickle.loads(in_a_pickle)\n        self.assertEqual(len(like_a_cuke), len(results))\n        self.assertEqual(like_a_cuke[0].id, results[0].id)\n\n\nclass Elasticsearch5BoostBackendTestCase(TestCase):\n    def setUp(self):\n        super().setUp()\n\n        # Wipe it clean.\n        self.raw_es = elasticsearch.Elasticsearch(\n            settings.HAYSTACK_CONNECTIONS[\"elasticsearch\"][\"URL\"]\n        )\n        clear_elasticsearch_index()\n\n        # Stow.\n        self.old_ui = connections[\"elasticsearch\"].get_unified_index()\n        self.ui = UnifiedIndex()\n        self.smmi = Elasticsearch5BoostMockSearchIndex()\n        self.ui.build(indexes=[self.smmi])\n        connections[\"elasticsearch\"]._index = self.ui\n        self.sb = connections[\"elasticsearch\"].get_backend()\n\n        self.sample_objs = []\n\n        for i in range(1, 5):\n            mock = AFourthMockModel()\n            mock.id = i\n\n            if i % 2:\n                mock.author = \"daniel\"\n                mock.editor = \"david\"\n            else:\n                mock.author = \"david\"\n                mock.editor = \"daniel\"\n\n            mock.pub_date = datetime.date(2009, 2, 25) - datetime.timedelta(days=i)\n            self.sample_objs.append(mock)\n\n    def tearDown(self):\n        connections[\"elasticsearch\"]._index = self.old_ui\n        super().tearDown()\n\n    def raw_search(self, query):\n        return self.raw_es.search(\n            q=\"*:*\", index=settings.HAYSTACK_CONNECTIONS[\"elasticsearch\"][\"INDEX_NAME\"]\n        )\n\n    def test_boost(self):\n        self.sb.update(self.smmi, self.sample_objs)\n        self.assertEqual(self.raw_search(\"*:*\")[\"hits\"][\"total\"], 4)\n\n        results = SearchQuerySet(using=\"elasticsearch\").filter(\n            SQ(author=\"daniel\") | SQ(editor=\"daniel\")\n        )\n\n        self.assertEqual(\n            set([result.id for result in results]),\n            {\n                \"core.afourthmockmodel.4\",\n                \"core.afourthmockmodel.3\",\n                \"core.afourthmockmodel.1\",\n                \"core.afourthmockmodel.2\",\n            },\n        )\n\n    def test__to_python(self):\n        self.assertEqual(self.sb._to_python(\"abc\"), \"abc\")\n        self.assertEqual(self.sb._to_python(\"1\"), 1)\n        self.assertEqual(self.sb._to_python(\"2653\"), 2653)\n        self.assertEqual(self.sb._to_python(\"25.5\"), 25.5)\n        self.assertEqual(self.sb._to_python(\"[1, 2, 3]\"), [1, 2, 3])\n        self.assertEqual(\n            self.sb._to_python('{\"a\": 1, \"b\": 2, \"c\": 3}'), {\"a\": 1, \"c\": 3, \"b\": 2}\n        )\n        self.assertEqual(\n            self.sb._to_python(\"2009-05-09T16:14:00\"),\n            datetime.datetime(2009, 5, 9, 16, 14),\n        )\n        self.assertEqual(\n            self.sb._to_python(\"2009-05-09T00:00:00\"),\n            datetime.datetime(2009, 5, 9, 0, 0),\n        )\n        self.assertEqual(self.sb._to_python(None), None)\n\n\nclass RecreateIndexTestCase(TestCase):\n    def setUp(self):\n        self.raw_es = elasticsearch.Elasticsearch(\n            settings.HAYSTACK_CONNECTIONS[\"elasticsearch\"][\"URL\"]\n        )\n\n    def test_recreate_index(self):\n        clear_elasticsearch_index()\n\n        sb = connections[\"elasticsearch\"].get_backend()\n        sb.silently_fail = True\n        sb.setup()\n\n        original_mapping = self.raw_es.indices.get_mapping(index=sb.index_name)\n\n        sb.clear()\n        sb.setup()\n\n        try:\n            updated_mapping = self.raw_es.indices.get_mapping(sb.index_name)\n        except elasticsearch.NotFoundError:\n            self.fail(\"There is no mapping after recreating the index\")\n\n        self.assertEqual(\n            original_mapping,\n            updated_mapping,\n            \"Mapping after recreating the index differs from the original one\",\n        )\n\n\nclass Elasticsearch5FacetingTestCase(TestCase):\n    def setUp(self):\n        super().setUp()\n\n        # Wipe it clean.\n        clear_elasticsearch_index()\n\n        # Stow.\n        self.old_ui = connections[\"elasticsearch\"].get_unified_index()\n        self.ui = UnifiedIndex()\n        self.smmi = Elasticsearch5FacetingMockSearchIndex()\n        self.ui.build(indexes=[self.smmi])\n        connections[\"elasticsearch\"]._index = self.ui\n        self.sb = connections[\"elasticsearch\"].get_backend()\n\n        # Force the backend to rebuild the mapping each time.\n        self.sb.existing_mapping = {}\n        self.sb.setup()\n\n        self.sample_objs = []\n\n        for i in range(1, 10):\n            mock = AFourthMockModel()\n            mock.id = i\n            if i > 5:\n                mock.editor = \"George Taylor\"\n            else:\n                mock.editor = \"Perry White\"\n            if i % 2:\n                mock.author = \"Daniel Lindsley\"\n            else:\n                mock.author = \"Dan Watson\"\n            mock.pub_date = datetime.date(2013, 9, (i % 4) + 1)\n            self.sample_objs.append(mock)\n\n    def tearDown(self):\n        connections[\"elasticsearch\"]._index = self.old_ui\n        super().tearDown()\n\n    def test_facet(self):\n        self.sb.update(self.smmi, self.sample_objs)\n        counts = (\n            SearchQuerySet(\"elasticsearch\")\n            .facet(\"author\")\n            .facet(\"editor\")\n            .facet_counts()\n        )\n        self.assertEqual(\n            counts[\"fields\"][\"author\"], [(\"Daniel Lindsley\", 5), (\"Dan Watson\", 4)]\n        )\n        self.assertEqual(\n            counts[\"fields\"][\"editor\"], [(\"Perry White\", 5), (\"George Taylor\", 4)]\n        )\n        counts = (\n            SearchQuerySet(\"elasticsearch\")\n            .filter(content=\"white\")\n            .facet(\"facet_field\", order=\"reverse_count\")\n            .facet_counts()\n        )\n        self.assertEqual(\n            counts[\"fields\"][\"facet_field\"], [(\"Dan Watson\", 2), (\"Daniel Lindsley\", 3)]\n        )\n\n    def test_multiple_narrow(self):\n        self.sb.update(self.smmi, self.sample_objs)\n        counts = (\n            SearchQuerySet(\"elasticsearch\")\n            .narrow('editor_exact:\"Perry White\"')\n            .narrow('author_exact:\"Daniel Lindsley\"')\n            .facet(\"author\")\n            .facet_counts()\n        )\n        self.assertEqual(counts[\"fields\"][\"author\"], [(\"Daniel Lindsley\", 3)])\n\n    def test_narrow(self):\n        self.sb.update(self.smmi, self.sample_objs)\n        counts = (\n            SearchQuerySet(\"elasticsearch\")\n            .facet(\"author\")\n            .facet(\"editor\")\n            .narrow('editor_exact:\"Perry White\"')\n            .facet_counts()\n        )\n        self.assertEqual(\n            counts[\"fields\"][\"author\"], [(\"Daniel Lindsley\", 3), (\"Dan Watson\", 2)]\n        )\n        self.assertEqual(counts[\"fields\"][\"editor\"], [(\"Perry White\", 5)])\n\n    def test_date_facet(self):\n        self.sb.update(self.smmi, self.sample_objs)\n        start = datetime.date(2013, 9, 1)\n        end = datetime.date(2013, 9, 30)\n        # Facet by day\n        counts = (\n            SearchQuerySet(\"elasticsearch\")\n            .date_facet(\"pub_date\", start_date=start, end_date=end, gap_by=\"day\")\n            .facet_counts()\n        )\n        self.assertEqual(\n            counts[\"dates\"][\"pub_date\"],\n            [\n                (datetime.datetime(2013, 9, 1), 2),\n                (datetime.datetime(2013, 9, 2), 3),\n                (datetime.datetime(2013, 9, 3), 2),\n                (datetime.datetime(2013, 9, 4), 2),\n            ],\n        )\n        # By month\n        counts = (\n            SearchQuerySet(\"elasticsearch\")\n            .date_facet(\"pub_date\", start_date=start, end_date=end, gap_by=\"month\")\n            .facet_counts()\n        )\n        self.assertEqual(\n            counts[\"dates\"][\"pub_date\"], [(datetime.datetime(2013, 9, 1), 9)]\n        )\n"
  },
  {
    "path": "test_haystack/elasticsearch5_tests/test_inputs.py",
    "content": "from django.test import TestCase\n\nfrom haystack import connections, inputs\n\n\nclass Elasticsearch5InputTestCase(TestCase):\n    def setUp(self):\n        super().setUp()\n        self.query_obj = connections[\"elasticsearch\"].get_query()\n\n    def test_raw_init(self):\n        raw = inputs.Raw(\"hello OR there, :you\")\n        self.assertEqual(raw.query_string, \"hello OR there, :you\")\n        self.assertEqual(raw.kwargs, {})\n        self.assertEqual(raw.post_process, False)\n\n        raw = inputs.Raw(\"hello OR there, :you\", test=\"really\")\n        self.assertEqual(raw.query_string, \"hello OR there, :you\")\n        self.assertEqual(raw.kwargs, {\"test\": \"really\"})\n        self.assertEqual(raw.post_process, False)\n\n    def test_raw_prepare(self):\n        raw = inputs.Raw(\"hello OR there, :you\")\n        self.assertEqual(raw.prepare(self.query_obj), \"hello OR there, :you\")\n\n    def test_clean_init(self):\n        clean = inputs.Clean(\"hello OR there, :you\")\n        self.assertEqual(clean.query_string, \"hello OR there, :you\")\n        self.assertEqual(clean.post_process, True)\n\n    def test_clean_prepare(self):\n        clean = inputs.Clean(\"hello OR there, :you\")\n        self.assertEqual(clean.prepare(self.query_obj), \"hello or there, \\\\:you\")\n\n    def test_exact_init(self):\n        exact = inputs.Exact(\"hello OR there, :you\")\n        self.assertEqual(exact.query_string, \"hello OR there, :you\")\n        self.assertEqual(exact.post_process, True)\n\n    def test_exact_prepare(self):\n        exact = inputs.Exact(\"hello OR there, :you\")\n        self.assertEqual(exact.prepare(self.query_obj), '\"hello OR there, :you\"')\n\n        exact = inputs.Exact(\"hello OR there, :you\", clean=True)\n        self.assertEqual(exact.prepare(self.query_obj), '\"hello or there, \\\\:you\"')\n\n    def test_not_init(self):\n        not_it = inputs.Not(\"hello OR there, :you\")\n        self.assertEqual(not_it.query_string, \"hello OR there, :you\")\n        self.assertEqual(not_it.post_process, True)\n\n    def test_not_prepare(self):\n        not_it = inputs.Not(\"hello OR there, :you\")\n        self.assertEqual(not_it.prepare(self.query_obj), \"NOT (hello or there, \\\\:you)\")\n\n    def test_autoquery_init(self):\n        autoquery = inputs.AutoQuery('panic -don\\'t \"froody dude\"')\n        self.assertEqual(autoquery.query_string, 'panic -don\\'t \"froody dude\"')\n        self.assertEqual(autoquery.post_process, False)\n\n    def test_autoquery_prepare(self):\n        autoquery = inputs.AutoQuery('panic -don\\'t \"froody dude\"')\n        self.assertEqual(\n            autoquery.prepare(self.query_obj), 'panic NOT don\\'t \"froody dude\"'\n        )\n\n    def test_altparser_init(self):\n        altparser = inputs.AltParser(\"dismax\")\n        self.assertEqual(altparser.parser_name, \"dismax\")\n        self.assertEqual(altparser.query_string, \"\")\n        self.assertEqual(altparser.kwargs, {})\n        self.assertEqual(altparser.post_process, False)\n\n        altparser = inputs.AltParser(\"dismax\", \"douglas adams\", qf=\"author\", mm=1)\n        self.assertEqual(altparser.parser_name, \"dismax\")\n        self.assertEqual(altparser.query_string, \"douglas adams\")\n        self.assertEqual(altparser.kwargs, {\"mm\": 1, \"qf\": \"author\"})\n        self.assertEqual(altparser.post_process, False)\n\n    def test_altparser_prepare(self):\n        altparser = inputs.AltParser(\"dismax\", \"douglas adams\", qf=\"author\", mm=1)\n        self.assertEqual(\n            altparser.prepare(self.query_obj),\n            \"\"\"{!dismax mm=1 qf=author v='douglas adams'}\"\"\",\n        )\n"
  },
  {
    "path": "test_haystack/elasticsearch5_tests/test_query.py",
    "content": "import datetime\n\nfrom django.contrib.gis.measure import D\nfrom django.test import TestCase\n\nfrom haystack import connections\nfrom haystack.inputs import Exact\nfrom haystack.models import SearchResult\nfrom haystack.query import SQ, SearchQuerySet\n\nfrom ..core.models import AnotherMockModel, MockModel\n\n\nclass Elasticsearch5SearchQueryTestCase(TestCase):\n    def setUp(self):\n        super().setUp()\n        self.sq = connections[\"elasticsearch\"].get_query()\n\n    def test_build_query_all(self):\n        self.assertEqual(self.sq.build_query(), \"*:*\")\n\n    def test_build_query_single_word(self):\n        self.sq.add_filter(SQ(content=\"hello\"))\n        self.assertEqual(self.sq.build_query(), \"(hello)\")\n\n    def test_build_query_boolean(self):\n        self.sq.add_filter(SQ(content=True))\n        self.assertEqual(self.sq.build_query(), \"(True)\")\n\n    def test_regression_slash_search(self):\n        self.sq.add_filter(SQ(content=\"hello/\"))\n        self.assertEqual(self.sq.build_query(), \"(hello\\\\/)\")\n\n    def test_build_query_datetime(self):\n        self.sq.add_filter(SQ(content=datetime.datetime(2009, 5, 8, 11, 28)))\n        self.assertEqual(self.sq.build_query(), \"(2009-05-08T11:28:00)\")\n\n    def test_build_query_multiple_words_and(self):\n        self.sq.add_filter(SQ(content=\"hello\"))\n        self.sq.add_filter(SQ(content=\"world\"))\n        self.assertEqual(self.sq.build_query(), \"((hello) AND (world))\")\n\n    def test_build_query_multiple_words_not(self):\n        self.sq.add_filter(~SQ(content=\"hello\"))\n        self.sq.add_filter(~SQ(content=\"world\"))\n        self.assertEqual(self.sq.build_query(), \"(NOT ((hello)) AND NOT ((world)))\")\n\n    def test_build_query_multiple_words_or(self):\n        self.sq.add_filter(~SQ(content=\"hello\"))\n        self.sq.add_filter(SQ(content=\"hello\"), use_or=True)\n        self.assertEqual(self.sq.build_query(), \"(NOT ((hello)) OR (hello))\")\n\n    def test_build_query_multiple_words_mixed(self):\n        self.sq.add_filter(SQ(content=\"why\"))\n        self.sq.add_filter(SQ(content=\"hello\"), use_or=True)\n        self.sq.add_filter(~SQ(content=\"world\"))\n        self.assertEqual(\n            self.sq.build_query(), \"(((why) OR (hello)) AND NOT ((world)))\"\n        )\n\n    def test_build_query_phrase(self):\n        self.sq.add_filter(SQ(content=\"hello world\"))\n        self.assertEqual(self.sq.build_query(), \"(hello AND world)\")\n\n        self.sq.add_filter(SQ(content__exact=\"hello world\"))\n        self.assertEqual(\n            self.sq.build_query(), '((hello AND world) AND (\"hello world\"))'\n        )\n\n    def test_build_query_boost(self):\n        self.sq.add_filter(SQ(content=\"hello\"))\n        self.sq.add_boost(\"world\", 5)\n        self.assertEqual(self.sq.build_query(), \"(hello) world^5\")\n\n    def test_build_query_multiple_filter_types(self):\n        self.sq.add_filter(SQ(content=\"why\"))\n        self.sq.add_filter(SQ(pub_date__lte=Exact(\"2009-02-10 01:59:00\")))\n        self.sq.add_filter(SQ(author__gt=\"daniel\"))\n        self.sq.add_filter(SQ(created__lt=Exact(\"2009-02-12 12:13:00\")))\n        self.sq.add_filter(SQ(title__gte=\"B\"))\n        self.sq.add_filter(SQ(id__in=[1, 2, 3]))\n        self.sq.add_filter(SQ(rating__range=[3, 5]))\n        self.assertEqual(\n            self.sq.build_query(),\n            '((why) AND pub_date:([* TO \"2009-02-10 01:59:00\"]) AND author:({\"daniel\" TO *}) AND created:({* TO \"2009-02-12 12:13:00\"}) AND title:([\"B\" TO *]) AND id:(\"1\" OR \"2\" OR \"3\") AND rating:([\"3\" TO \"5\"]))',\n        )\n\n    def test_build_query_multiple_filter_types_with_datetimes(self):\n        self.sq.add_filter(SQ(content=\"why\"))\n        self.sq.add_filter(SQ(pub_date__lte=datetime.datetime(2009, 2, 10, 1, 59, 0)))\n        self.sq.add_filter(SQ(author__gt=\"daniel\"))\n        self.sq.add_filter(SQ(created__lt=datetime.datetime(2009, 2, 12, 12, 13, 0)))\n        self.sq.add_filter(SQ(title__gte=\"B\"))\n        self.sq.add_filter(SQ(id__in=[1, 2, 3]))\n        self.sq.add_filter(SQ(rating__range=[3, 5]))\n        self.assertEqual(\n            self.sq.build_query(),\n            '((why) AND pub_date:([* TO \"2009-02-10T01:59:00\"]) AND author:({\"daniel\" TO *}) AND created:({* TO \"2009-02-12T12:13:00\"}) AND title:([\"B\" TO *]) AND id:(\"1\" OR \"2\" OR \"3\") AND rating:([\"3\" TO \"5\"]))',\n        )\n\n    def test_build_query_in_filter_multiple_words(self):\n        self.sq.add_filter(SQ(content=\"why\"))\n        self.sq.add_filter(SQ(title__in=[\"A Famous Paper\", \"An Infamous Article\"]))\n        self.assertEqual(\n            self.sq.build_query(),\n            '((why) AND title:(\"A Famous Paper\" OR \"An Infamous Article\"))',\n        )\n\n    def test_build_query_in_filter_datetime(self):\n        self.sq.add_filter(SQ(content=\"why\"))\n        self.sq.add_filter(SQ(pub_date__in=[datetime.datetime(2009, 7, 6, 1, 56, 21)]))\n        self.assertEqual(\n            self.sq.build_query(), '((why) AND pub_date:(\"2009-07-06T01:56:21\"))'\n        )\n\n    def test_build_query_in_with_set(self):\n        self.sq.add_filter(SQ(content=\"why\"))\n        self.sq.add_filter(SQ(title__in={\"A Famous Paper\", \"An Infamous Article\"}))\n        self.assertTrue(\"((why) AND title:(\" in self.sq.build_query())\n        self.assertTrue('\"A Famous Paper\"' in self.sq.build_query())\n        self.assertTrue('\"An Infamous Article\"' in self.sq.build_query())\n\n    def test_build_query_wildcard_filter_types(self):\n        self.sq.add_filter(SQ(content=\"why\"))\n        self.sq.add_filter(SQ(title__startswith=\"haystack\"))\n        self.assertEqual(self.sq.build_query(), \"((why) AND title:(haystack*))\")\n\n    def test_build_query_fuzzy_filter_types(self):\n        self.sq.add_filter(SQ(content=\"why\"))\n        self.sq.add_filter(SQ(title__fuzzy=\"haystack\"))\n        self.assertEqual(self.sq.build_query(), \"((why) AND title:(haystack~))\")\n\n    def test_clean(self):\n        self.assertEqual(self.sq.clean(\"hello world\"), \"hello world\")\n        self.assertEqual(self.sq.clean(\"hello AND world\"), \"hello and world\")\n        self.assertEqual(\n            self.sq.clean(\n                r'hello AND OR NOT TO + - && || ! ( ) { } [ ] ^ \" ~ * ? : \\ / world'\n            ),\n            'hello and or not to \\\\+ \\\\- \\\\&& \\\\|| \\\\! \\\\( \\\\) \\\\{ \\\\} \\\\[ \\\\] \\\\^ \\\\\" \\\\~ \\\\* \\\\? \\\\: \\\\\\\\ \\\\/ world',\n        )\n        self.assertEqual(\n            self.sq.clean(\"so please NOTe i am in a bAND and bORed\"),\n            \"so please NOTe i am in a bAND and bORed\",\n        )\n\n    def test_build_query_with_models(self):\n        self.sq.add_filter(SQ(content=\"hello\"))\n        self.sq.add_model(MockModel)\n        self.assertEqual(self.sq.build_query(), \"(hello)\")\n\n        self.sq.add_model(AnotherMockModel)\n        self.assertEqual(self.sq.build_query(), \"(hello)\")\n\n    def test_set_result_class(self):\n        # Assert that we're defaulting to ``SearchResult``.\n        self.assertTrue(issubclass(self.sq.result_class, SearchResult))\n\n        # Custom class.\n        class IttyBittyResult(object):\n            pass\n\n        self.sq.set_result_class(IttyBittyResult)\n        self.assertTrue(issubclass(self.sq.result_class, IttyBittyResult))\n\n        # Reset to default.\n        self.sq.set_result_class(None)\n        self.assertTrue(issubclass(self.sq.result_class, SearchResult))\n\n    def test_in_filter_values_list(self):\n        self.sq.add_filter(SQ(content=\"why\"))\n        self.sq.add_filter(SQ(title__in=[1, 2, 3]))\n        self.assertEqual(self.sq.build_query(), '((why) AND title:(\"1\" OR \"2\" OR \"3\"))')\n\n    def test_narrow_sq(self):\n        sqs = SearchQuerySet(using=\"elasticsearch\").narrow(SQ(foo=\"moof\"))\n        self.assertTrue(isinstance(sqs, SearchQuerySet))\n        self.assertEqual(len(sqs.query.narrow_queries), 1)\n        self.assertEqual(sqs.query.narrow_queries.pop(), \"foo:(moof)\")\n\n    def test_build_query_with_dwithin_range(self):\n        from django.contrib.gis.geos import Point\n\n        backend = connections[\"elasticsearch\"].get_backend()\n        search_kwargs = backend.build_search_kwargs(\n            \"where\",\n            dwithin={\n                \"field\": \"location_field\",\n                \"point\": Point(1.2345678, 2.3456789),\n                \"distance\": D(m=500),\n            },\n        )\n        self.assertEqual(\n            search_kwargs[\"query\"][\"bool\"][\"filter\"][\"geo_distance\"],\n            {\n                \"distance\": \"0.500000km\",\n                \"location_field\": {\"lat\": 2.3456789, \"lon\": 1.2345678},\n            },\n        )\n"
  },
  {
    "path": "test_haystack/elasticsearch_tests/__init__.py",
    "content": "import unittest\nimport warnings\n\nfrom django.conf import settings\n\nfrom haystack.utils import log as logging\n\nwarnings.simplefilter(\"ignore\", Warning)\n\n\ndef setup():\n    log = logging.getLogger(\"haystack\")\n    try:\n        import elasticsearch\n\n        if not ((1, 0, 0) <= elasticsearch.__version__ < (2, 0, 0)):\n            raise ImportError\n        from elasticsearch import Elasticsearch, ElasticsearchException\n    except ImportError:\n        log.error(\n            \"Skipping ElasticSearch 1 tests: 'elasticsearch>=1.0.0,<2.0.0' not installed.\"\n        )\n        raise unittest.SkipTest(\"'elasticsearch>=1.0.0,<2.0.0' not installed.\")\n\n    es = Elasticsearch(settings.HAYSTACK_CONNECTIONS[\"elasticsearch\"][\"URL\"])\n    try:\n        es.info()\n    except ElasticsearchException as e:\n        log.error(\n            \"elasticsearch not running on %r\"\n            % settings.HAYSTACK_CONNECTIONS[\"elasticsearch\"][\"URL\"],\n            exc_info=True,\n        )\n        raise unittest.SkipTest(\n            \"elasticsearch not running on %r\"\n            % settings.HAYSTACK_CONNECTIONS[\"elasticsearch\"][\"URL\"],\n            e,\n        )\n"
  },
  {
    "path": "test_haystack/elasticsearch_tests/test_elasticsearch_backend.py",
    "content": "import datetime\nimport logging as std_logging\nimport operator\nimport pickle\nimport unittest\nfrom contextlib import contextmanager\nfrom decimal import Decimal\n\nimport elasticsearch\nfrom django.apps import apps\nfrom django.conf import settings\nfrom django.test import TestCase\nfrom django.test.utils import override_settings\n\nfrom haystack import connections, indexes, reset_search_queries\nfrom haystack.exceptions import SkipDocument\nfrom haystack.inputs import AutoQuery\nfrom haystack.models import SearchResult\nfrom haystack.query import SQ, RelatedSearchQuerySet, SearchQuerySet\nfrom haystack.utils import log as logging\nfrom haystack.utils.loading import UnifiedIndex\n\nfrom ..core.models import AFourthMockModel, AnotherMockModel, ASixthMockModel, MockModel\nfrom ..mocks import MockSearchResult\n\n\ndef clear_elasticsearch_index():\n    # Wipe it clean.\n    raw_es = elasticsearch.Elasticsearch(\n        settings.HAYSTACK_CONNECTIONS[\"elasticsearch\"][\"URL\"]\n    )\n    try:\n        raw_es.indices.delete(\n            index=settings.HAYSTACK_CONNECTIONS[\"elasticsearch\"][\"INDEX_NAME\"]\n        )\n        raw_es.indices.refresh()\n    except elasticsearch.TransportError:\n        pass\n\n    # Since we've just completely deleted the index, we'll reset setup_complete so the next access will\n    # correctly define the mappings:\n    connections[\"elasticsearch\"].get_backend().setup_complete = False\n\n\nclass ElasticsearchMockSearchIndex(indexes.SearchIndex, indexes.Indexable):\n    text = indexes.CharField(document=True, use_template=True)\n    name = indexes.CharField(model_attr=\"author\", faceted=True)\n    pub_date = indexes.DateTimeField(model_attr=\"pub_date\")\n\n    def get_model(self):\n        return MockModel\n\n\nclass ElasticsearchMockSearchIndexWithSkipDocument(ElasticsearchMockSearchIndex):\n    def prepare_text(self, obj):\n        if obj.author == \"daniel3\":\n            raise SkipDocument\n        return \"Indexed!\\n%s\" % obj.id\n\n\nclass ElasticsearchMockSpellingIndex(indexes.SearchIndex, indexes.Indexable):\n    text = indexes.CharField(document=True)\n    name = indexes.CharField(model_attr=\"author\", faceted=True)\n    pub_date = indexes.DateTimeField(model_attr=\"pub_date\")\n\n    def get_model(self):\n        return MockModel\n\n    def prepare_text(self, obj):\n        return obj.foo\n\n\nclass ElasticsearchMaintainTypeMockSearchIndex(indexes.SearchIndex, indexes.Indexable):\n    text = indexes.CharField(document=True, use_template=True)\n    month = indexes.CharField(indexed=False)\n    pub_date = indexes.DateTimeField(model_attr=\"pub_date\")\n\n    def prepare_month(self, obj):\n        return \"%02d\" % obj.pub_date.month\n\n    def get_model(self):\n        return MockModel\n\n\nclass ElasticsearchMockModelSearchIndex(indexes.SearchIndex, indexes.Indexable):\n    text = indexes.CharField(model_attr=\"foo\", document=True)\n    name = indexes.CharField(model_attr=\"author\")\n    pub_date = indexes.DateTimeField(model_attr=\"pub_date\")\n\n    def get_model(self):\n        return MockModel\n\n\nclass ElasticsearchAnotherMockModelSearchIndex(indexes.SearchIndex, indexes.Indexable):\n    text = indexes.CharField(document=True)\n    name = indexes.CharField(model_attr=\"author\")\n    pub_date = indexes.DateTimeField(model_attr=\"pub_date\")\n\n    def get_model(self):\n        return AnotherMockModel\n\n    def prepare_text(self, obj):\n        return \"You might be searching for the user %s\" % obj.author\n\n\nclass ElasticsearchBoostMockSearchIndex(indexes.SearchIndex, indexes.Indexable):\n    text = indexes.CharField(\n        document=True,\n        use_template=True,\n        template_name=\"search/indexes/core/mockmodel_template.txt\",\n    )\n    author = indexes.CharField(model_attr=\"author\", weight=2.0)\n    editor = indexes.CharField(model_attr=\"editor\")\n    pub_date = indexes.DateTimeField(model_attr=\"pub_date\")\n\n    def get_model(self):\n        return AFourthMockModel\n\n    def prepare(self, obj):\n        data = super().prepare(obj)\n\n        if obj.pk == 4:\n            data[\"boost\"] = 5.0\n\n        return data\n\n\nclass ElasticsearchFacetingMockSearchIndex(indexes.SearchIndex, indexes.Indexable):\n    text = indexes.CharField(document=True)\n    author = indexes.CharField(model_attr=\"author\", faceted=True)\n    editor = indexes.CharField(model_attr=\"editor\", faceted=True)\n    pub_date = indexes.DateField(model_attr=\"pub_date\", faceted=True)\n    facet_field = indexes.FacetCharField(model_attr=\"author\")\n\n    def prepare_text(self, obj):\n        return \"%s %s\" % (obj.author, obj.editor)\n\n    def get_model(self):\n        return AFourthMockModel\n\n\nclass ElasticsearchRoundTripSearchIndex(indexes.SearchIndex, indexes.Indexable):\n    text = indexes.CharField(document=True, default=\"\")\n    name = indexes.CharField()\n    is_active = indexes.BooleanField()\n    post_count = indexes.IntegerField()\n    average_rating = indexes.FloatField()\n    price = indexes.DecimalField()\n    pub_date = indexes.DateField()\n    created = indexes.DateTimeField()\n    tags = indexes.MultiValueField()\n    sites = indexes.MultiValueField()\n\n    def get_model(self):\n        return MockModel\n\n    def prepare(self, obj):\n        prepped = super().prepare(obj)\n        prepped.update(\n            {\n                \"text\": \"This is some example text.\",\n                \"name\": \"Mister Pants\",\n                \"is_active\": True,\n                \"post_count\": 25,\n                \"average_rating\": 3.6,\n                \"price\": Decimal(\"24.99\"),\n                \"pub_date\": datetime.date(2009, 11, 21),\n                \"created\": datetime.datetime(2009, 11, 21, 21, 31, 00),\n                \"tags\": [\"staff\", \"outdoor\", \"activist\", \"scientist\"],\n                \"sites\": [3, 5, 1],\n            }\n        )\n        return prepped\n\n\nclass ElasticsearchComplexFacetsMockSearchIndex(indexes.SearchIndex, indexes.Indexable):\n    text = indexes.CharField(document=True, default=\"\")\n    name = indexes.CharField(faceted=True)\n    is_active = indexes.BooleanField(faceted=True)\n    post_count = indexes.IntegerField()\n    post_count_i = indexes.FacetIntegerField(facet_for=\"post_count\")\n    average_rating = indexes.FloatField(faceted=True)\n    pub_date = indexes.DateField(faceted=True)\n    created = indexes.DateTimeField(faceted=True)\n    sites = indexes.MultiValueField(faceted=True)\n\n    def get_model(self):\n        return MockModel\n\n\nclass ElasticsearchAutocompleteMockModelSearchIndex(\n    indexes.SearchIndex, indexes.Indexable\n):\n    text = indexes.CharField(model_attr=\"foo\", document=True)\n    name = indexes.CharField(model_attr=\"author\")\n    pub_date = indexes.DateTimeField(model_attr=\"pub_date\")\n    text_auto = indexes.EdgeNgramField(model_attr=\"foo\")\n    name_auto = indexes.EdgeNgramField(model_attr=\"author\")\n\n    def get_model(self):\n        return MockModel\n\n\nclass ElasticsearchSpatialSearchIndex(indexes.SearchIndex, indexes.Indexable):\n    text = indexes.CharField(model_attr=\"name\", document=True)\n    location = indexes.LocationField()\n\n    def prepare_location(self, obj):\n        return \"%s,%s\" % (obj.lat, obj.lon)\n\n    def get_model(self):\n        return ASixthMockModel\n\n\nclass TestSettings(TestCase):\n    def test_kwargs_are_passed_on(self):\n        from haystack.backends.elasticsearch_backend import ElasticsearchSearchBackend\n\n        backend = ElasticsearchSearchBackend(\n            \"alias\",\n            **{\n                \"URL\": settings.HAYSTACK_CONNECTIONS[\"elasticsearch\"][\"URL\"],\n                \"INDEX_NAME\": \"testing\",\n                \"KWARGS\": {\"max_retries\": 42},\n            }\n        )\n\n        self.assertEqual(backend.conn.transport.max_retries, 42)\n\n\nclass ElasticSearchMockUnifiedIndex(UnifiedIndex):\n\n    spy_args = None\n\n    def get_index(self, model_klass):\n        if self.spy_args is not None:\n            self.spy_args.setdefault(\"get_index\", []).append(model_klass)\n        return super().get_index(model_klass)\n\n    @contextmanager\n    def spy(self):\n        try:\n            self.spy_args = {}\n            yield self.spy_args\n        finally:\n            self.spy_args = None\n\n\nclass ElasticsearchSearchBackendTestCase(TestCase):\n    def setUp(self):\n        super().setUp()\n\n        # Wipe it clean.\n        self.raw_es = elasticsearch.Elasticsearch(\n            settings.HAYSTACK_CONNECTIONS[\"elasticsearch\"][\"URL\"]\n        )\n        clear_elasticsearch_index()\n\n        # Stow.\n        self.old_ui = connections[\"elasticsearch\"].get_unified_index()\n        self.ui = ElasticSearchMockUnifiedIndex()\n        self.smmi = ElasticsearchMockSearchIndex()\n        self.smmidni = ElasticsearchMockSearchIndexWithSkipDocument()\n        self.smtmmi = ElasticsearchMaintainTypeMockSearchIndex()\n        self.ui.build(indexes=[self.smmi])\n        connections[\"elasticsearch\"]._index = self.ui\n        self.sb = connections[\"elasticsearch\"].get_backend()\n\n        # Force the backend to rebuild the mapping each time.\n        self.sb.existing_mapping = {}\n        self.sb.setup()\n\n        self.sample_objs = []\n\n        for i in range(1, 4):\n            mock = MockModel()\n            mock.id = i\n            mock.author = \"daniel%s\" % i\n            mock.pub_date = datetime.date(2009, 2, 25) - datetime.timedelta(days=i)\n            self.sample_objs.append(mock)\n\n    def tearDown(self):\n        connections[\"elasticsearch\"]._index = self.old_ui\n        super().tearDown()\n        self.sb.silently_fail = True\n\n    def raw_search(self, query):\n        try:\n            return self.raw_es.search(\n                q=\"*:*\",\n                index=settings.HAYSTACK_CONNECTIONS[\"elasticsearch\"][\"INDEX_NAME\"],\n            )\n        except elasticsearch.TransportError:\n            return {}\n\n    def test_non_silent(self):\n        bad_sb = connections[\"elasticsearch\"].backend(\n            \"bad\",\n            URL=\"http://omg.wtf.bbq:1000/\",\n            INDEX_NAME=\"whatver\",\n            SILENTLY_FAIL=False,\n            TIMEOUT=1,\n        )\n\n        try:\n            bad_sb.update(self.smmi, self.sample_objs)\n            self.fail()\n        except:\n            pass\n\n        try:\n            bad_sb.remove(\"core.mockmodel.1\")\n            self.fail()\n        except:\n            pass\n\n        try:\n            bad_sb.clear()\n            self.fail()\n        except:\n            pass\n\n        try:\n            bad_sb.search(\"foo\")\n            self.fail()\n        except:\n            pass\n\n    def test_update_no_documents(self):\n        url = settings.HAYSTACK_CONNECTIONS[\"elasticsearch\"][\"URL\"]\n        index_name = settings.HAYSTACK_CONNECTIONS[\"elasticsearch\"][\"INDEX_NAME\"]\n\n        sb = connections[\"elasticsearch\"].backend(\n            \"elasticsearch\", URL=url, INDEX_NAME=index_name, SILENTLY_FAIL=True\n        )\n        self.assertEqual(sb.update(self.smmi, []), None)\n\n        sb = connections[\"elasticsearch\"].backend(\n            \"elasticsearch\", URL=url, INDEX_NAME=index_name, SILENTLY_FAIL=False\n        )\n        try:\n            sb.update(self.smmi, [])\n            self.fail()\n        except:\n            pass\n\n    def test_update(self):\n        self.sb.update(self.smmi, self.sample_objs)\n\n        # Check what Elasticsearch thinks is there.\n        self.assertEqual(self.raw_search(\"*:*\")[\"hits\"][\"total\"], 3)\n        self.assertEqual(\n            sorted(\n                [res[\"_source\"] for res in self.raw_search(\"*:*\")[\"hits\"][\"hits\"]],\n                key=lambda x: x[\"id\"],\n            ),\n            [\n                {\n                    \"django_id\": \"1\",\n                    \"django_ct\": \"core.mockmodel\",\n                    \"name\": \"daniel1\",\n                    \"name_exact\": \"daniel1\",\n                    \"text\": \"Indexed!\\n1\",\n                    \"pub_date\": \"2009-02-24T00:00:00\",\n                    \"id\": \"core.mockmodel.1\",\n                },\n                {\n                    \"django_id\": \"2\",\n                    \"django_ct\": \"core.mockmodel\",\n                    \"name\": \"daniel2\",\n                    \"name_exact\": \"daniel2\",\n                    \"text\": \"Indexed!\\n2\",\n                    \"pub_date\": \"2009-02-23T00:00:00\",\n                    \"id\": \"core.mockmodel.2\",\n                },\n                {\n                    \"django_id\": \"3\",\n                    \"django_ct\": \"core.mockmodel\",\n                    \"name\": \"daniel3\",\n                    \"name_exact\": \"daniel3\",\n                    \"text\": \"Indexed!\\n3\",\n                    \"pub_date\": \"2009-02-22T00:00:00\",\n                    \"id\": \"core.mockmodel.3\",\n                },\n            ],\n        )\n\n    def test_update_with_SkipDocument_raised(self):\n        self.sb.update(self.smmidni, self.sample_objs)\n\n        # Check what Elasticsearch thinks is there.\n        res = self.raw_search(\"*:*\")[\"hits\"]\n        self.assertEqual(res[\"total\"], 2)\n        self.assertListEqual(\n            sorted([x[\"_source\"][\"id\"] for x in res[\"hits\"]]),\n            [\"core.mockmodel.1\", \"core.mockmodel.2\"],\n        )\n\n    def test_remove(self):\n        self.sb.update(self.smmi, self.sample_objs)\n        self.assertEqual(self.raw_search(\"*:*\")[\"hits\"][\"total\"], 3)\n\n        self.sb.remove(self.sample_objs[0])\n        self.assertEqual(self.raw_search(\"*:*\")[\"hits\"][\"total\"], 2)\n        self.assertEqual(\n            sorted(\n                [res[\"_source\"] for res in self.raw_search(\"*:*\")[\"hits\"][\"hits\"]],\n                key=operator.itemgetter(\"django_id\"),\n            ),\n            [\n                {\n                    \"django_id\": \"2\",\n                    \"django_ct\": \"core.mockmodel\",\n                    \"name\": \"daniel2\",\n                    \"name_exact\": \"daniel2\",\n                    \"text\": \"Indexed!\\n2\",\n                    \"pub_date\": \"2009-02-23T00:00:00\",\n                    \"id\": \"core.mockmodel.2\",\n                },\n                {\n                    \"django_id\": \"3\",\n                    \"django_ct\": \"core.mockmodel\",\n                    \"name\": \"daniel3\",\n                    \"name_exact\": \"daniel3\",\n                    \"text\": \"Indexed!\\n3\",\n                    \"pub_date\": \"2009-02-22T00:00:00\",\n                    \"id\": \"core.mockmodel.3\",\n                },\n            ],\n        )\n\n    def test_remove_succeeds_on_404(self):\n        self.sb.silently_fail = False\n        self.sb.remove(\"core.mockmodel.421\")\n\n    def test_clear(self):\n        self.sb.update(self.smmi, self.sample_objs)\n        self.assertEqual(self.raw_search(\"*:*\").get(\"hits\", {}).get(\"total\", 0), 3)\n\n        self.sb.clear()\n        self.assertEqual(self.raw_search(\"*:*\").get(\"hits\", {}).get(\"total\", 0), 0)\n\n        self.sb.update(self.smmi, self.sample_objs)\n        self.assertEqual(self.raw_search(\"*:*\").get(\"hits\", {}).get(\"total\", 0), 3)\n\n        self.sb.clear([AnotherMockModel])\n        self.assertEqual(self.raw_search(\"*:*\").get(\"hits\", {}).get(\"total\", 0), 3)\n\n        self.sb.clear([MockModel])\n        self.assertEqual(self.raw_search(\"*:*\").get(\"hits\", {}).get(\"total\", 0), 0)\n\n        self.sb.update(self.smmi, self.sample_objs)\n        self.assertEqual(self.raw_search(\"*:*\").get(\"hits\", {}).get(\"total\", 0), 3)\n\n        self.sb.clear([AnotherMockModel, MockModel])\n        self.assertEqual(self.raw_search(\"*:*\").get(\"hits\", {}).get(\"total\", 0), 0)\n\n    def test_results_ask_for_index_per_entry(self):\n        # Test that index class is obtained per result entry, not per every entry field\n        self.sb.update(self.smmi, self.sample_objs)\n        with self.ui.spy() as spy:\n            self.sb.search(\"*:*\", limit_to_registered_models=False)\n            self.assertEqual(len(spy.get(\"get_index\", [])), len(self.sample_objs))\n\n    def test_search(self):\n        self.sb.update(self.smmi, self.sample_objs)\n        self.assertEqual(self.raw_search(\"*:*\")[\"hits\"][\"total\"], 3)\n\n        self.assertEqual(self.sb.search(\"\"), {\"hits\": 0, \"results\": []})\n        self.assertEqual(self.sb.search(\"*:*\")[\"hits\"], 3)\n        self.assertEqual(\n            set([result.pk for result in self.sb.search(\"*:*\")[\"results\"]]),\n            set([\"2\", \"1\", \"3\"]),\n        )\n\n        self.assertEqual(self.sb.search(\"\", highlight=True), {\"hits\": 0, \"results\": []})\n        self.assertEqual(self.sb.search(\"Index\", highlight=True)[\"hits\"], 3)\n        self.assertEqual(\n            sorted(\n                [\n                    result.highlighted[0]\n                    for result in self.sb.search(\"Index\", highlight=True)[\"results\"]\n                ]\n            ),\n            [\"<em>Indexed</em>!\\n1\", \"<em>Indexed</em>!\\n2\", \"<em>Indexed</em>!\\n3\"],\n        )\n        self.assertEqual(\n            sorted(\n                [\n                    result.highlighted[0]\n                    for result in self.sb.search(\n                        \"Index\",\n                        highlight={\"pre_tags\": [\"<start>\"], \"post_tags\": [\"</end>\"]},\n                    )[\"results\"]\n                ]\n            ),\n            [\n                \"<start>Indexed</end>!\\n1\",\n                \"<start>Indexed</end>!\\n2\",\n                \"<start>Indexed</end>!\\n3\",\n            ],\n        )\n\n        self.assertEqual(self.sb.search(\"Indx\")[\"hits\"], 0)\n        self.assertEqual(self.sb.search(\"indaxed\")[\"spelling_suggestion\"], \"indexed\")\n        self.assertEqual(\n            self.sb.search(\"arf\", spelling_query=\"indexyd\")[\"spelling_suggestion\"],\n            \"indexed\",\n        )\n\n        self.assertEqual(\n            self.sb.search(\"\", facets={\"name\": {}}), {\"hits\": 0, \"results\": []}\n        )\n        results = self.sb.search(\"Index\", facets={\"name\": {}})\n        self.assertEqual(results[\"hits\"], 3)\n        self.assertEqual(\n            results[\"facets\"][\"fields\"][\"name\"],\n            [(\"daniel3\", 1), (\"daniel2\", 1), (\"daniel1\", 1)],\n        )\n\n        self.assertEqual(\n            self.sb.search(\n                \"\",\n                date_facets={\n                    \"pub_date\": {\n                        \"start_date\": datetime.date(2008, 1, 1),\n                        \"end_date\": datetime.date(2009, 4, 1),\n                        \"gap_by\": \"month\",\n                        \"gap_amount\": 1,\n                    }\n                },\n            ),\n            {\"hits\": 0, \"results\": []},\n        )\n        results = self.sb.search(\n            \"Index\",\n            date_facets={\n                \"pub_date\": {\n                    \"start_date\": datetime.date(2008, 1, 1),\n                    \"end_date\": datetime.date(2009, 4, 1),\n                    \"gap_by\": \"month\",\n                    \"gap_amount\": 1,\n                }\n            },\n        )\n        self.assertEqual(results[\"hits\"], 3)\n        self.assertEqual(\n            results[\"facets\"][\"dates\"][\"pub_date\"],\n            [(datetime.datetime(2009, 2, 1, 0, 0), 3)],\n        )\n\n        self.assertEqual(\n            self.sb.search(\"\", query_facets=[(\"name\", \"[* TO e]\")]),\n            {\"hits\": 0, \"results\": []},\n        )\n        results = self.sb.search(\"Index\", query_facets=[(\"name\", \"[* TO e]\")])\n        self.assertEqual(results[\"hits\"], 3)\n        self.assertEqual(results[\"facets\"][\"queries\"], {\"name\": 3})\n\n        self.assertEqual(\n            self.sb.search(\"\", narrow_queries=set([\"name:daniel1\"])),\n            {\"hits\": 0, \"results\": []},\n        )\n        results = self.sb.search(\"Index\", narrow_queries=set([\"name:daniel1\"]))\n        self.assertEqual(results[\"hits\"], 1)\n\n        # Ensure that swapping the ``result_class`` works.\n        self.assertTrue(\n            isinstance(\n                self.sb.search(\"index\", result_class=MockSearchResult)[\"results\"][0],\n                MockSearchResult,\n            )\n        )\n\n        # Check the use of ``limit_to_registered_models``.\n        self.assertEqual(\n            self.sb.search(\"\", limit_to_registered_models=False),\n            {\"hits\": 0, \"results\": []},\n        )\n        self.assertEqual(\n            self.sb.search(\"*:*\", limit_to_registered_models=False)[\"hits\"], 3\n        )\n        self.assertEqual(\n            sorted(\n                [\n                    result.pk\n                    for result in self.sb.search(\n                        \"*:*\", limit_to_registered_models=False\n                    )[\"results\"]\n                ]\n            ),\n            [\"1\", \"2\", \"3\"],\n        )\n\n        # Stow.\n        old_limit_to_registered_models = getattr(\n            settings, \"HAYSTACK_LIMIT_TO_REGISTERED_MODELS\", True\n        )\n        settings.HAYSTACK_LIMIT_TO_REGISTERED_MODELS = False\n\n        self.assertEqual(self.sb.search(\"\"), {\"hits\": 0, \"results\": []})\n        self.assertEqual(self.sb.search(\"*:*\")[\"hits\"], 3)\n        self.assertEqual(\n            sorted([result.pk for result in self.sb.search(\"*:*\")[\"results\"]]),\n            [\"1\", \"2\", \"3\"],\n        )\n\n        # Restore.\n        settings.HAYSTACK_LIMIT_TO_REGISTERED_MODELS = old_limit_to_registered_models\n\n    def test_spatial_search_parameters(self):\n        from django.contrib.gis.geos import Point\n\n        p1 = Point(1.23, 4.56)\n        kwargs = self.sb.build_search_kwargs(\n            \"*:*\",\n            distance_point={\"field\": \"location\", \"point\": p1},\n            sort_by=((\"distance\", \"desc\"),),\n        )\n\n        self.assertIn(\"sort\", kwargs)\n        self.assertEqual(1, len(kwargs[\"sort\"]))\n        geo_d = kwargs[\"sort\"][0][\"_geo_distance\"]\n\n        # ElasticSearch supports the GeoJSON-style lng, lat pairs so unlike Solr the values should be\n        # in the same order as we used to create the Point():\n        # http://www.elasticsearch.org/guide/en/elasticsearch/reference/current/query-dsl-geo-distance-filter.html#_lat_lon_as_array_4\n\n        self.assertDictEqual(\n            geo_d, {\"location\": [1.23, 4.56], \"unit\": \"km\", \"order\": \"desc\"}\n        )\n\n    def test_more_like_this(self):\n        self.sb.update(self.smmi, self.sample_objs)\n        self.assertEqual(self.raw_search(\"*:*\")[\"hits\"][\"total\"], 3)\n\n        # A functional MLT example with enough data to work is below. Rely on\n        # this to ensure the API is correct enough.\n        self.assertEqual(self.sb.more_like_this(self.sample_objs[0])[\"hits\"], 0)\n        self.assertEqual(\n            [\n                result.pk\n                for result in self.sb.more_like_this(self.sample_objs[0])[\"results\"]\n            ],\n            [],\n        )\n\n    def test_build_schema(self):\n        old_ui = connections[\"elasticsearch\"].get_unified_index()\n\n        (content_field_name, mapping) = self.sb.build_schema(old_ui.all_searchfields())\n        self.assertEqual(content_field_name, \"text\")\n        self.assertEqual(len(mapping), 4 + 2)  # +2 management fields\n        self.assertEqual(\n            mapping,\n            {\n                \"django_id\": {\n                    \"index\": \"not_analyzed\",\n                    \"type\": \"string\",\n                    \"include_in_all\": False,\n                },\n                \"django_ct\": {\n                    \"index\": \"not_analyzed\",\n                    \"type\": \"string\",\n                    \"include_in_all\": False,\n                },\n                \"text\": {\"type\": \"string\", \"analyzer\": \"snowball\"},\n                \"pub_date\": {\"type\": \"date\"},\n                \"name\": {\"type\": \"string\", \"analyzer\": \"snowball\"},\n                \"name_exact\": {\"index\": \"not_analyzed\", \"type\": \"string\"},\n            },\n        )\n\n        ui = UnifiedIndex()\n        ui.build(indexes=[ElasticsearchComplexFacetsMockSearchIndex()])\n        (content_field_name, mapping) = self.sb.build_schema(ui.all_searchfields())\n        self.assertEqual(content_field_name, \"text\")\n        self.assertEqual(len(mapping), 15 + 2)  # +2 management fields\n        self.assertEqual(\n            mapping,\n            {\n                \"django_id\": {\n                    \"index\": \"not_analyzed\",\n                    \"type\": \"string\",\n                    \"include_in_all\": False,\n                },\n                \"django_ct\": {\n                    \"index\": \"not_analyzed\",\n                    \"type\": \"string\",\n                    \"include_in_all\": False,\n                },\n                \"name\": {\"type\": \"string\", \"analyzer\": \"snowball\"},\n                \"is_active_exact\": {\"type\": \"boolean\"},\n                \"created\": {\"type\": \"date\"},\n                \"post_count\": {\"type\": \"long\"},\n                \"created_exact\": {\"type\": \"date\"},\n                \"sites_exact\": {\"index\": \"not_analyzed\", \"type\": \"string\"},\n                \"is_active\": {\"type\": \"boolean\"},\n                \"sites\": {\"type\": \"string\", \"analyzer\": \"snowball\"},\n                \"post_count_i\": {\"type\": \"long\"},\n                \"average_rating\": {\"type\": \"float\"},\n                \"text\": {\"type\": \"string\", \"analyzer\": \"snowball\"},\n                \"pub_date_exact\": {\"type\": \"date\"},\n                \"name_exact\": {\"index\": \"not_analyzed\", \"type\": \"string\"},\n                \"pub_date\": {\"type\": \"date\"},\n                \"average_rating_exact\": {\"type\": \"float\"},\n            },\n        )\n\n    def test_verify_type(self):\n        old_ui = connections[\"elasticsearch\"].get_unified_index()\n        ui = UnifiedIndex()\n        smtmmi = ElasticsearchMaintainTypeMockSearchIndex()\n        ui.build(indexes=[smtmmi])\n        connections[\"elasticsearch\"]._index = ui\n        sb = connections[\"elasticsearch\"].get_backend()\n        sb.update(smtmmi, self.sample_objs)\n\n        self.assertEqual(sb.search(\"*:*\")[\"hits\"], 3)\n        self.assertEqual(\n            [result.month for result in sb.search(\"*:*\")[\"results\"]], [\"02\", \"02\", \"02\"]\n        )\n        connections[\"elasticsearch\"]._index = old_ui\n\n\nclass CaptureHandler(std_logging.Handler):\n    logs_seen = []\n\n    def emit(self, record):\n        CaptureHandler.logs_seen.append(record)\n\n\nclass FailedElasticsearchSearchBackendTestCase(TestCase):\n    def setUp(self):\n        self.sample_objs = []\n\n        for i in range(1, 4):\n            mock = MockModel()\n            mock.id = i\n            mock.author = \"daniel%s\" % i\n            mock.pub_date = datetime.date(2009, 2, 25) - datetime.timedelta(days=i)\n            self.sample_objs.append(mock)\n\n        # Stow.\n        # Point the backend at a URL that doesn't exist so we can watch the\n        # sparks fly.\n        self.old_es_url = settings.HAYSTACK_CONNECTIONS[\"elasticsearch\"][\"URL\"]\n        settings.HAYSTACK_CONNECTIONS[\"elasticsearch\"][\"URL\"] = (\n            \"%s/foo/\" % self.old_es_url\n        )\n        self.cap = CaptureHandler()\n        logging.getLogger(\"haystack\").addHandler(self.cap)\n        config = apps.get_app_config(\"haystack\")\n        logging.getLogger(\"haystack\").removeHandler(config.stream)\n\n        # Setup the rest of the bits.\n        self.old_ui = connections[\"elasticsearch\"].get_unified_index()\n        ui = UnifiedIndex()\n        self.smmi = ElasticsearchMockSearchIndex()\n        ui.build(indexes=[self.smmi])\n        connections[\"elasticsearch\"]._index = ui\n        self.sb = connections[\"elasticsearch\"].get_backend()\n\n    def tearDown(self):\n        # Restore.\n        settings.HAYSTACK_CONNECTIONS[\"elasticsearch\"][\"URL\"] = self.old_es_url\n        connections[\"elasticsearch\"]._index = self.old_ui\n        config = apps.get_app_config(\"haystack\")\n        logging.getLogger(\"haystack\").removeHandler(self.cap)\n        logging.getLogger(\"haystack\").addHandler(config.stream)\n\n    @unittest.expectedFailure\n    def test_all_cases(self):\n        # Prior to the addition of the try/except bits, these would all fail miserably.\n        self.assertEqual(len(CaptureHandler.logs_seen), 0)\n\n        self.sb.update(self.smmi, self.sample_objs)\n        self.assertEqual(len(CaptureHandler.logs_seen), 1)\n\n        self.sb.remove(self.sample_objs[0])\n        self.assertEqual(len(CaptureHandler.logs_seen), 2)\n\n        self.sb.search(\"search\")\n        self.assertEqual(len(CaptureHandler.logs_seen), 3)\n\n        self.sb.more_like_this(self.sample_objs[0])\n        self.assertEqual(len(CaptureHandler.logs_seen), 4)\n\n        self.sb.clear([MockModel])\n        self.assertEqual(len(CaptureHandler.logs_seen), 5)\n\n        self.sb.clear()\n        self.assertEqual(len(CaptureHandler.logs_seen), 6)\n\n\nclass LiveElasticsearchSearchQueryTestCase(TestCase):\n    fixtures = [\"base_data.json\"]\n\n    def setUp(self):\n        super().setUp()\n\n        # Wipe it clean.\n        clear_elasticsearch_index()\n\n        # Stow.\n        self.old_ui = connections[\"elasticsearch\"].get_unified_index()\n        self.ui = UnifiedIndex()\n        self.smmi = ElasticsearchMockSearchIndex()\n        self.ui.build(indexes=[self.smmi])\n        connections[\"elasticsearch\"]._index = self.ui\n        self.sb = connections[\"elasticsearch\"].get_backend()\n        self.sq = connections[\"elasticsearch\"].get_query()\n\n        # Force indexing of the content.\n        self.smmi.update(using=\"elasticsearch\")\n\n    def tearDown(self):\n        connections[\"elasticsearch\"]._index = self.old_ui\n        super().tearDown()\n\n    def test_log_query(self):\n        reset_search_queries()\n        self.assertEqual(len(connections[\"elasticsearch\"].queries), 0)\n\n        with self.settings(DEBUG=False):\n            len(self.sq.get_results())\n            self.assertEqual(len(connections[\"elasticsearch\"].queries), 0)\n\n        with self.settings(DEBUG=True):\n            # Redefine it to clear out the cached results.\n            self.sq = connections[\"elasticsearch\"].query(using=\"elasticsearch\")\n            self.sq.add_filter(SQ(name=\"bar\"))\n            len(self.sq.get_results())\n            self.assertEqual(len(connections[\"elasticsearch\"].queries), 1)\n            self.assertEqual(\n                connections[\"elasticsearch\"].queries[0][\"query_string\"], \"name:(bar)\"\n            )\n\n            # And again, for good measure.\n            self.sq = connections[\"elasticsearch\"].query(\"elasticsearch\")\n            self.sq.add_filter(SQ(name=\"bar\"))\n            self.sq.add_filter(SQ(text=\"moof\"))\n            len(self.sq.get_results())\n            self.assertEqual(len(connections[\"elasticsearch\"].queries), 2)\n            self.assertEqual(\n                connections[\"elasticsearch\"].queries[0][\"query_string\"], \"name:(bar)\"\n            )\n            self.assertEqual(\n                connections[\"elasticsearch\"].queries[1][\"query_string\"],\n                \"(name:(bar) AND text:(moof))\",\n            )\n\n\nlssqstc_all_loaded = None\n\n\n@override_settings(DEBUG=True)\nclass LiveElasticsearchSearchQuerySetTestCase(TestCase):\n    \"\"\"Used to test actual implementation details of the SearchQuerySet.\"\"\"\n\n    fixtures = [\"base_data.json\", \"bulk_data.json\"]\n\n    def setUp(self):\n        super().setUp()\n\n        # Stow.\n        self.old_ui = connections[\"elasticsearch\"].get_unified_index()\n        self.ui = UnifiedIndex()\n        self.smmi = ElasticsearchMockSearchIndex()\n        self.ui.build(indexes=[self.smmi])\n        connections[\"elasticsearch\"]._index = self.ui\n\n        self.sqs = SearchQuerySet(\"elasticsearch\")\n        self.rsqs = RelatedSearchQuerySet(\"elasticsearch\")\n\n        # Ugly but not constantly reindexing saves us almost 50% runtime.\n        global lssqstc_all_loaded\n\n        if lssqstc_all_loaded is None:\n            lssqstc_all_loaded = True\n\n            # Wipe it clean.\n            clear_elasticsearch_index()\n\n            # Force indexing of the content.\n            self.smmi.update(using=\"elasticsearch\")\n\n    def tearDown(self):\n        # Restore.\n        connections[\"elasticsearch\"]._index = self.old_ui\n        super().tearDown()\n\n    def test_load_all(self):\n        sqs = self.sqs.order_by(\"pub_date\").load_all()\n        self.assertTrue(isinstance(sqs, SearchQuerySet))\n        self.assertTrue(len(sqs) > 0)\n        self.assertEqual(\n            sqs[2].object.foo,\n            \"In addition, you may specify other fields to be populated along with the document. In this case, we also index the user who authored the document as well as the date the document was published. The variable you assign the SearchField to should directly map to the field your search backend is expecting. You instantiate most search fields with a parameter that points to the attribute of the object to populate that field with.\",\n        )\n\n    def test_iter(self):\n        reset_search_queries()\n        self.assertEqual(len(connections[\"elasticsearch\"].queries), 0)\n        sqs = self.sqs.all()\n        results = sorted([int(result.pk) for result in list(sqs)])\n        self.assertEqual(results, list(range(1, 24)))\n        self.assertEqual(len(connections[\"elasticsearch\"].queries), 4)\n\n    def test_slice(self):\n        reset_search_queries()\n        self.assertEqual(len(connections[\"elasticsearch\"].queries), 0)\n        results = self.sqs.all().order_by(\"pub_date\")\n        self.assertEqual(\n            [int(result.pk) for result in results[1:11]],\n            [3, 2, 4, 5, 6, 7, 8, 9, 10, 11],\n        )\n        self.assertEqual(len(connections[\"elasticsearch\"].queries), 1)\n\n        reset_search_queries()\n        self.assertEqual(len(connections[\"elasticsearch\"].queries), 0)\n        results = self.sqs.all().order_by(\"pub_date\")\n        self.assertEqual(int(results[21].pk), 22)\n        self.assertEqual(len(connections[\"elasticsearch\"].queries), 1)\n\n    def test_values_slicing(self):\n        reset_search_queries()\n        self.assertEqual(len(connections[\"elasticsearch\"].queries), 0)\n\n        # TODO: this would be a good candidate for refactoring into a TestCase subclass shared across backends\n\n        # The values will come back as strings because Hasytack doesn't assume PKs are integers.\n        # We'll prepare this set once since we're going to query the same results in multiple ways:\n        expected_pks = [str(i) for i in [3, 2, 4, 5, 6, 7, 8, 9, 10, 11]]\n\n        results = self.sqs.all().order_by(\"pub_date\").values(\"pk\")\n        self.assertListEqual([i[\"pk\"] for i in results[1:11]], expected_pks)\n\n        results = self.sqs.all().order_by(\"pub_date\").values_list(\"pk\")\n        self.assertListEqual([i[0] for i in results[1:11]], expected_pks)\n\n        results = self.sqs.all().order_by(\"pub_date\").values_list(\"pk\", flat=True)\n        self.assertListEqual(results[1:11], expected_pks)\n\n        self.assertEqual(len(connections[\"elasticsearch\"].queries), 3)\n\n    def test_count(self):\n        reset_search_queries()\n        self.assertEqual(len(connections[\"elasticsearch\"].queries), 0)\n        sqs = self.sqs.all()\n        self.assertEqual(sqs.count(), 23)\n        self.assertEqual(sqs.count(), 23)\n        self.assertEqual(len(sqs), 23)\n        self.assertEqual(sqs.count(), 23)\n        # Should only execute one query to count the length of the result set.\n        self.assertEqual(len(connections[\"elasticsearch\"].queries), 1)\n\n    def test_highlight(self):\n        reset_search_queries()\n        results = self.sqs.filter(content=\"index\").highlight()\n        self.assertEqual(results[0].highlighted, [\"<em>Indexed</em>!\\n1\"])\n\n    def test_highlight_options(self):\n        reset_search_queries()\n        results = self.sqs.filter(content=\"index\")\n        results = results.highlight(pre_tags=[\"<i>\"], post_tags=[\"</i>\"])\n        self.assertEqual(results[0].highlighted, [\"<i>Indexed</i>!\\n1\"])\n\n    def test_manual_iter(self):\n        results = self.sqs.all()\n\n        reset_search_queries()\n        self.assertEqual(len(connections[\"elasticsearch\"].queries), 0)\n        results = set([int(result.pk) for result in results._manual_iter()])\n        self.assertEqual(\n            results,\n            set(\n                [\n                    2,\n                    7,\n                    12,\n                    17,\n                    1,\n                    6,\n                    11,\n                    16,\n                    23,\n                    5,\n                    10,\n                    15,\n                    22,\n                    4,\n                    9,\n                    14,\n                    19,\n                    21,\n                    3,\n                    8,\n                    13,\n                    18,\n                    20,\n                ]\n            ),\n        )\n        self.assertEqual(len(connections[\"elasticsearch\"].queries), 3)\n\n    def test_fill_cache(self):\n        reset_search_queries()\n        self.assertEqual(len(connections[\"elasticsearch\"].queries), 0)\n        results = self.sqs.all()\n        self.assertEqual(len(results._result_cache), 0)\n        self.assertEqual(len(connections[\"elasticsearch\"].queries), 0)\n        results._fill_cache(0, 10)\n        self.assertEqual(\n            len([result for result in results._result_cache if result is not None]), 10\n        )\n        self.assertEqual(len(connections[\"elasticsearch\"].queries), 1)\n        results._fill_cache(10, 20)\n        self.assertEqual(\n            len([result for result in results._result_cache if result is not None]), 20\n        )\n        self.assertEqual(len(connections[\"elasticsearch\"].queries), 2)\n\n    def test_cache_is_full(self):\n        reset_search_queries()\n        self.assertEqual(len(connections[\"elasticsearch\"].queries), 0)\n        self.assertEqual(self.sqs._cache_is_full(), False)\n        results = self.sqs.all()\n        fire_the_iterator_and_fill_cache = list(results)\n        self.assertEqual(23, len(fire_the_iterator_and_fill_cache))\n        self.assertEqual(results._cache_is_full(), True)\n        self.assertEqual(len(connections[\"elasticsearch\"].queries), 4)\n\n    def test___and__(self):\n        sqs1 = self.sqs.filter(content=\"foo\")\n        sqs2 = self.sqs.filter(content=\"bar\")\n        sqs = sqs1 & sqs2\n\n        self.assertTrue(isinstance(sqs, SearchQuerySet))\n        self.assertEqual(len(sqs.query.query_filter), 2)\n        self.assertEqual(sqs.query.build_query(), \"((foo) AND (bar))\")\n\n        # Now for something more complex...\n        sqs3 = self.sqs.exclude(title=\"moof\").filter(\n            SQ(content=\"foo\") | SQ(content=\"baz\")\n        )\n        sqs4 = self.sqs.filter(content=\"bar\")\n        sqs = sqs3 & sqs4\n\n        self.assertTrue(isinstance(sqs, SearchQuerySet))\n        self.assertEqual(len(sqs.query.query_filter), 3)\n        self.assertEqual(\n            sqs.query.build_query(),\n            \"(NOT (title:(moof)) AND ((foo) OR (baz)) AND (bar))\",\n        )\n\n    def test___or__(self):\n        sqs1 = self.sqs.filter(content=\"foo\")\n        sqs2 = self.sqs.filter(content=\"bar\")\n        sqs = sqs1 | sqs2\n\n        self.assertTrue(isinstance(sqs, SearchQuerySet))\n        self.assertEqual(len(sqs.query.query_filter), 2)\n        self.assertEqual(sqs.query.build_query(), \"((foo) OR (bar))\")\n\n        # Now for something more complex...\n        sqs3 = self.sqs.exclude(title=\"moof\").filter(\n            SQ(content=\"foo\") | SQ(content=\"baz\")\n        )\n        sqs4 = self.sqs.filter(content=\"bar\").models(MockModel)\n        sqs = sqs3 | sqs4\n\n        self.assertTrue(isinstance(sqs, SearchQuerySet))\n        self.assertEqual(len(sqs.query.query_filter), 2)\n        self.assertEqual(\n            sqs.query.build_query(),\n            \"((NOT (title:(moof)) AND ((foo) OR (baz))) OR (bar))\",\n        )\n\n    def test_auto_query(self):\n        # Ensure bits in exact matches get escaped properly as well.\n        # This will break horrifically if escaping isn't working.\n        sqs = self.sqs.auto_query('\"pants:rule\"')\n        self.assertTrue(isinstance(sqs, SearchQuerySet))\n        self.assertEqual(\n            repr(sqs.query.query_filter), '<SQ: AND content__content=\"pants:rule\">'\n        )\n        self.assertEqual(sqs.query.build_query(), '(\"pants\\\\:rule\")')\n        self.assertEqual(len(sqs), 0)\n\n    def test_query__in(self):\n        self.assertGreater(len(self.sqs), 0)\n        sqs = self.sqs.filter(django_ct=\"core.mockmodel\", django_id__in=[1, 2])\n        self.assertEqual(len(sqs), 2)\n\n    def test_query__in_empty_list(self):\n        \"\"\"Confirm that an empty list avoids a Elasticsearch exception\"\"\"\n        self.assertGreater(len(self.sqs), 0)\n        sqs = self.sqs.filter(id__in=[])\n        self.assertEqual(len(sqs), 0)\n\n    # Regressions\n\n    def test_regression_proper_start_offsets(self):\n        sqs = self.sqs.filter(text=\"index\")\n        self.assertNotEqual(sqs.count(), 0)\n\n        id_counts = {}\n\n        for item in sqs:\n            if item.id in id_counts:\n                id_counts[item.id] += 1\n            else:\n                id_counts[item.id] = 1\n\n        for key, value in id_counts.items():\n            if value > 1:\n                self.fail(\n                    \"Result with id '%s' seen more than once in the results.\" % key\n                )\n\n    def test_regression_raw_search_breaks_slicing(self):\n        sqs = self.sqs.raw_search(\"text:index\")\n        page_1 = [result.pk for result in sqs[0:10]]\n        page_2 = [result.pk for result in sqs[10:20]]\n\n        for pk in page_2:\n            if pk in page_1:\n                self.fail(\n                    \"Result with id '%s' seen more than once in the results.\" % pk\n                )\n\n    # RelatedSearchQuerySet Tests\n\n    def test_related_load_all(self):\n        sqs = self.rsqs.order_by(\"pub_date\").load_all()\n        self.assertTrue(isinstance(sqs, SearchQuerySet))\n        self.assertTrue(len(sqs) > 0)\n        self.assertEqual(\n            sqs[2].object.foo,\n            \"In addition, you may specify other fields to be populated along with the document. In this case, we also index the user who authored the document as well as the date the document was published. The variable you assign the SearchField to should directly map to the field your search backend is expecting. You instantiate most search fields with a parameter that points to the attribute of the object to populate that field with.\",\n        )\n\n    def test_related_load_all_queryset(self):\n        sqs = self.rsqs.load_all().order_by(\"pub_date\")\n        self.assertEqual(len(sqs._load_all_querysets), 0)\n\n        sqs = sqs.load_all_queryset(MockModel, MockModel.objects.filter(id__gt=1))\n        self.assertTrue(isinstance(sqs, SearchQuerySet))\n        self.assertEqual(len(sqs._load_all_querysets), 1)\n        self.assertEqual(sorted([obj.object.id for obj in sqs]), list(range(2, 24)))\n\n        sqs = sqs.load_all_queryset(MockModel, MockModel.objects.filter(id__gt=10))\n        self.assertTrue(isinstance(sqs, SearchQuerySet))\n        self.assertEqual(len(sqs._load_all_querysets), 1)\n        self.assertEqual(\n            set([obj.object.id for obj in sqs]),\n            set([12, 17, 11, 16, 23, 15, 22, 14, 19, 21, 13, 18, 20]),\n        )\n        self.assertEqual(set([obj.object.id for obj in sqs[10:20]]), set([21, 22, 23]))\n\n    def test_related_iter(self):\n        reset_search_queries()\n        self.assertEqual(len(connections[\"elasticsearch\"].queries), 0)\n        sqs = self.rsqs.all()\n        results = set([int(result.pk) for result in list(sqs)])\n        self.assertEqual(\n            results,\n            set(\n                [\n                    2,\n                    7,\n                    12,\n                    17,\n                    1,\n                    6,\n                    11,\n                    16,\n                    23,\n                    5,\n                    10,\n                    15,\n                    22,\n                    4,\n                    9,\n                    14,\n                    19,\n                    21,\n                    3,\n                    8,\n                    13,\n                    18,\n                    20,\n                ]\n            ),\n        )\n        self.assertEqual(len(connections[\"elasticsearch\"].queries), 4)\n\n    def test_related_slice(self):\n        reset_search_queries()\n        self.assertEqual(len(connections[\"elasticsearch\"].queries), 0)\n        results = self.rsqs.all().order_by(\"pub_date\")\n        self.assertEqual(\n            [int(result.pk) for result in results[1:11]],\n            [3, 2, 4, 5, 6, 7, 8, 9, 10, 11],\n        )\n        self.assertEqual(len(connections[\"elasticsearch\"].queries), 1)\n\n        reset_search_queries()\n        self.assertEqual(len(connections[\"elasticsearch\"].queries), 0)\n        results = self.rsqs.all().order_by(\"pub_date\")\n        self.assertEqual(int(results[21].pk), 22)\n        self.assertEqual(len(connections[\"elasticsearch\"].queries), 1)\n\n        reset_search_queries()\n        self.assertEqual(len(connections[\"elasticsearch\"].queries), 0)\n        results = self.rsqs.all().order_by(\"pub_date\")\n        self.assertEqual(\n            set([int(result.pk) for result in results[20:30]]), set([21, 22, 23])\n        )\n        self.assertEqual(len(connections[\"elasticsearch\"].queries), 1)\n\n    def test_related_manual_iter(self):\n        results = self.rsqs.all()\n\n        reset_search_queries()\n        self.assertEqual(len(connections[\"elasticsearch\"].queries), 0)\n        results = sorted([int(result.pk) for result in results._manual_iter()])\n        self.assertEqual(results, list(range(1, 24)))\n        self.assertEqual(len(connections[\"elasticsearch\"].queries), 3)\n\n    def test_related_fill_cache(self):\n        reset_search_queries()\n        self.assertEqual(len(connections[\"elasticsearch\"].queries), 0)\n        results = self.rsqs.all()\n        self.assertEqual(len(results._result_cache), 0)\n        self.assertEqual(len(connections[\"elasticsearch\"].queries), 0)\n        results._fill_cache(0, 10)\n        self.assertEqual(\n            len([result for result in results._result_cache if result is not None]), 10\n        )\n        self.assertEqual(len(connections[\"elasticsearch\"].queries), 1)\n        results._fill_cache(10, 20)\n        self.assertEqual(\n            len([result for result in results._result_cache if result is not None]), 20\n        )\n        self.assertEqual(len(connections[\"elasticsearch\"].queries), 2)\n\n    def test_related_cache_is_full(self):\n        reset_search_queries()\n        self.assertEqual(len(connections[\"elasticsearch\"].queries), 0)\n        self.assertEqual(self.rsqs._cache_is_full(), False)\n        results = self.rsqs.all()\n        fire_the_iterator_and_fill_cache = list(results)\n        self.assertEqual(23, len(fire_the_iterator_and_fill_cache))\n        self.assertEqual(results._cache_is_full(), True)\n        self.assertEqual(len(connections[\"elasticsearch\"].queries), 4)\n\n    def test_quotes_regression(self):\n        sqs = self.sqs.auto_query(\"44°48'40''N 20°28'32''E\")\n        # Should not have empty terms.\n        self.assertEqual(sqs.query.build_query(), \"(44\\xb048'40''N 20\\xb028'32''E)\")\n        # Should not cause Elasticsearch to 500.\n        self.assertEqual(sqs.count(), 0)\n\n        sqs = self.sqs.auto_query(\"blazing\")\n        self.assertEqual(sqs.query.build_query(), \"(blazing)\")\n        self.assertEqual(sqs.count(), 0)\n        sqs = self.sqs.auto_query(\"blazing saddles\")\n        self.assertEqual(sqs.query.build_query(), \"(blazing saddles)\")\n        self.assertEqual(sqs.count(), 0)\n        sqs = self.sqs.auto_query('\"blazing saddles')\n        self.assertEqual(sqs.query.build_query(), '(\\\\\"blazing saddles)')\n        self.assertEqual(sqs.count(), 0)\n        sqs = self.sqs.auto_query('\"blazing saddles\"')\n        self.assertEqual(sqs.query.build_query(), '(\"blazing saddles\")')\n        self.assertEqual(sqs.count(), 0)\n        sqs = self.sqs.auto_query('mel \"blazing saddles\"')\n        self.assertEqual(sqs.query.build_query(), '(mel \"blazing saddles\")')\n        self.assertEqual(sqs.count(), 0)\n        sqs = self.sqs.auto_query('mel \"blazing \\'saddles\"')\n        self.assertEqual(sqs.query.build_query(), '(mel \"blazing \\'saddles\")')\n        self.assertEqual(sqs.count(), 0)\n        sqs = self.sqs.auto_query(\"mel \\\"blazing ''saddles\\\"\")\n        self.assertEqual(sqs.query.build_query(), \"(mel \\\"blazing ''saddles\\\")\")\n        self.assertEqual(sqs.count(), 0)\n        sqs = self.sqs.auto_query(\"mel \\\"blazing ''saddles\\\"'\")\n        self.assertEqual(sqs.query.build_query(), \"(mel \\\"blazing ''saddles\\\" ')\")\n        self.assertEqual(sqs.count(), 0)\n        sqs = self.sqs.auto_query(\"mel \\\"blazing ''saddles\\\"'\\\"\")\n        self.assertEqual(sqs.query.build_query(), \"(mel \\\"blazing ''saddles\\\" '\\\\\\\")\")\n        self.assertEqual(sqs.count(), 0)\n        sqs = self.sqs.auto_query('\"blazing saddles\" mel')\n        self.assertEqual(sqs.query.build_query(), '(\"blazing saddles\" mel)')\n        self.assertEqual(sqs.count(), 0)\n        sqs = self.sqs.auto_query('\"blazing saddles\" mel brooks')\n        self.assertEqual(sqs.query.build_query(), '(\"blazing saddles\" mel brooks)')\n        self.assertEqual(sqs.count(), 0)\n        sqs = self.sqs.auto_query('mel \"blazing saddles\" brooks')\n        self.assertEqual(sqs.query.build_query(), '(mel \"blazing saddles\" brooks)')\n        self.assertEqual(sqs.count(), 0)\n        sqs = self.sqs.auto_query('mel \"blazing saddles\" \"brooks')\n        self.assertEqual(sqs.query.build_query(), '(mel \"blazing saddles\" \\\\\"brooks)')\n        self.assertEqual(sqs.count(), 0)\n\n    def test_query_generation(self):\n        sqs = self.sqs.filter(\n            SQ(content=AutoQuery(\"hello world\")) | SQ(title=AutoQuery(\"hello world\"))\n        )\n        self.assertEqual(\n            sqs.query.build_query(), \"((hello world) OR title:(hello world))\"\n        )\n\n    def test_result_class(self):\n        # Assert that we're defaulting to ``SearchResult``.\n        sqs = self.sqs.all()\n        self.assertTrue(isinstance(sqs[0], SearchResult))\n\n        # Custom class.\n        sqs = self.sqs.result_class(MockSearchResult).all()\n        self.assertTrue(isinstance(sqs[0], MockSearchResult))\n\n        # Reset to default.\n        sqs = self.sqs.result_class(None).all()\n        self.assertTrue(isinstance(sqs[0], SearchResult))\n\n\n@override_settings(DEBUG=True)\nclass LiveElasticsearchSpellingTestCase(TestCase):\n    \"\"\"Used to test actual implementation details of the SearchQuerySet.\"\"\"\n\n    fixtures = [\"base_data.json\", \"bulk_data.json\"]\n\n    def setUp(self):\n        super().setUp()\n\n        # Stow.\n        self.old_ui = connections[\"elasticsearch\"].get_unified_index()\n        self.ui = UnifiedIndex()\n        self.smmi = ElasticsearchMockSpellingIndex()\n        self.ui.build(indexes=[self.smmi])\n        connections[\"elasticsearch\"]._index = self.ui\n\n        self.sqs = SearchQuerySet(\"elasticsearch\")\n\n        # Wipe it clean.\n        clear_elasticsearch_index()\n\n        # Reboot the schema.\n        self.sb = connections[\"elasticsearch\"].get_backend()\n        self.sb.setup()\n\n        self.smmi.update(using=\"elasticsearch\")\n\n    def tearDown(self):\n        # Restore.\n        connections[\"elasticsearch\"]._index = self.old_ui\n        super().tearDown()\n\n    def test_spelling(self):\n        self.assertEqual(\n            self.sqs.auto_query(\"structurd\").spelling_suggestion(), \"structured\"\n        )\n        self.assertEqual(self.sqs.spelling_suggestion(\"structurd\"), \"structured\")\n        self.assertEqual(\n            self.sqs.auto_query(\"srchindex instanc\").spelling_suggestion(),\n            \"searchindex instance\",\n        )\n        self.assertEqual(\n            self.sqs.spelling_suggestion(\"srchindex instanc\"), \"searchindex instance\"\n        )\n\n        sqs = self.sqs.auto_query(\"something completely different\").set_spelling_query(\n            \"structurd\"\n        )\n        self.assertEqual(sqs.spelling_suggestion(), \"structured\")\n\n\nclass LiveElasticsearchMoreLikeThisTestCase(TestCase):\n    fixtures = [\"base_data.json\", \"bulk_data.json\"]\n\n    def setUp(self):\n        super().setUp()\n\n        # Wipe it clean.\n        clear_elasticsearch_index()\n\n        self.old_ui = connections[\"elasticsearch\"].get_unified_index()\n        self.ui = UnifiedIndex()\n        self.smmi = ElasticsearchMockModelSearchIndex()\n        self.sammi = ElasticsearchAnotherMockModelSearchIndex()\n        self.ui.build(indexes=[self.smmi, self.sammi])\n        connections[\"elasticsearch\"]._index = self.ui\n\n        self.sqs = SearchQuerySet(\"elasticsearch\")\n\n        self.smmi.update(using=\"elasticsearch\")\n        self.sammi.update(using=\"elasticsearch\")\n\n    def tearDown(self):\n        # Restore.\n        connections[\"elasticsearch\"]._index = self.old_ui\n        super().tearDown()\n\n    def test_more_like_this(self):\n        mlt = self.sqs.more_like_this(MockModel.objects.get(pk=1))\n        self.assertEqual(mlt.count(), 4)\n        self.assertEqual(\n            set([result.pk for result in mlt]), set([\"2\", \"6\", \"16\", \"23\"])\n        )\n        self.assertEqual(len([result.pk for result in mlt]), 4)\n\n        alt_mlt = self.sqs.filter(name=\"daniel3\").more_like_this(\n            MockModel.objects.get(pk=2)\n        )\n        self.assertEqual(alt_mlt.count(), 6)\n        self.assertEqual(\n            set([result.pk for result in alt_mlt]),\n            set([\"2\", \"6\", \"16\", \"23\", \"1\", \"11\"]),\n        )\n        self.assertEqual(len([result.pk for result in alt_mlt]), 6)\n\n        alt_mlt_with_models = self.sqs.models(MockModel).more_like_this(\n            MockModel.objects.get(pk=1)\n        )\n        self.assertEqual(alt_mlt_with_models.count(), 4)\n        self.assertEqual(\n            set([result.pk for result in alt_mlt_with_models]),\n            set([\"2\", \"6\", \"16\", \"23\"]),\n        )\n        self.assertEqual(len([result.pk for result in alt_mlt_with_models]), 4)\n\n        if hasattr(MockModel.objects, \"defer\"):\n            # Make sure MLT works with deferred bits.\n            mi = MockModel.objects.defer(\"foo\").get(pk=1)\n            deferred = self.sqs.models(MockModel).more_like_this(mi)\n            self.assertEqual(deferred.count(), 4)\n            self.assertEqual(\n                set([result.pk for result in deferred]), set([\"2\", \"6\", \"16\", \"23\"])\n            )\n            self.assertEqual(len([result.pk for result in deferred]), 4)\n\n        # Ensure that swapping the ``result_class`` works.\n        self.assertTrue(\n            isinstance(\n                self.sqs.result_class(MockSearchResult).more_like_this(\n                    MockModel.objects.get(pk=1)\n                )[0],\n                MockSearchResult,\n            )\n        )\n\n\nclass LiveElasticsearchAutocompleteTestCase(TestCase):\n    fixtures = [\"base_data.json\", \"bulk_data.json\"]\n\n    def setUp(self):\n        super().setUp()\n\n        # Stow.\n        self.old_ui = connections[\"elasticsearch\"].get_unified_index()\n        self.ui = UnifiedIndex()\n        self.smmi = ElasticsearchAutocompleteMockModelSearchIndex()\n        self.ui.build(indexes=[self.smmi])\n        connections[\"elasticsearch\"]._index = self.ui\n\n        self.sqs = SearchQuerySet(\"elasticsearch\")\n\n        # Wipe it clean.\n        clear_elasticsearch_index()\n\n        # Reboot the schema.\n        self.sb = connections[\"elasticsearch\"].get_backend()\n        self.sb.setup()\n\n        self.smmi.update(using=\"elasticsearch\")\n\n    def tearDown(self):\n        # Restore.\n        connections[\"elasticsearch\"]._index = self.old_ui\n        super().tearDown()\n\n    def test_build_schema(self):\n        self.sb = connections[\"elasticsearch\"].get_backend()\n        content_name, mapping = self.sb.build_schema(self.ui.all_searchfields())\n        self.assertEqual(\n            mapping,\n            {\n                \"django_id\": {\n                    \"index\": \"not_analyzed\",\n                    \"type\": \"string\",\n                    \"include_in_all\": False,\n                },\n                \"django_ct\": {\n                    \"index\": \"not_analyzed\",\n                    \"type\": \"string\",\n                    \"include_in_all\": False,\n                },\n                \"name_auto\": {\"type\": \"string\", \"analyzer\": \"edgengram_analyzer\"},\n                \"text\": {\"type\": \"string\", \"analyzer\": \"snowball\"},\n                \"pub_date\": {\"type\": \"date\"},\n                \"name\": {\"type\": \"string\", \"analyzer\": \"snowball\"},\n                \"text_auto\": {\"type\": \"string\", \"analyzer\": \"edgengram_analyzer\"},\n            },\n        )\n\n    def test_autocomplete(self):\n        autocomplete = self.sqs.autocomplete(text_auto=\"mod\")\n        self.assertEqual(autocomplete.count(), 16)\n        self.assertEqual(\n            set([result.pk for result in autocomplete]),\n            set(\n                [\n                    \"1\",\n                    \"12\",\n                    \"6\",\n                    \"14\",\n                    \"7\",\n                    \"4\",\n                    \"23\",\n                    \"17\",\n                    \"13\",\n                    \"18\",\n                    \"20\",\n                    \"22\",\n                    \"19\",\n                    \"15\",\n                    \"10\",\n                    \"2\",\n                ]\n            ),\n        )\n        self.assertTrue(\"mod\" in autocomplete[0].text.lower())\n        self.assertTrue(\"mod\" in autocomplete[1].text.lower())\n        self.assertTrue(\"mod\" in autocomplete[2].text.lower())\n        self.assertTrue(\"mod\" in autocomplete[3].text.lower())\n        self.assertTrue(\"mod\" in autocomplete[4].text.lower())\n        self.assertEqual(len([result.pk for result in autocomplete]), 16)\n\n        # Test multiple words.\n        autocomplete_2 = self.sqs.autocomplete(text_auto=\"your mod\")\n        self.assertEqual(autocomplete_2.count(), 13)\n        self.assertEqual(\n            set([result.pk for result in autocomplete_2]),\n            set(\n                [\n                    \"1\",\n                    \"6\",\n                    \"2\",\n                    \"14\",\n                    \"12\",\n                    \"13\",\n                    \"10\",\n                    \"19\",\n                    \"4\",\n                    \"20\",\n                    \"23\",\n                    \"22\",\n                    \"15\",\n                ]\n            ),\n        )\n        self.assertTrue(\"your\" in autocomplete_2[0].text.lower())\n        self.assertTrue(\"mod\" in autocomplete_2[0].text.lower())\n        self.assertTrue(\"your\" in autocomplete_2[1].text.lower())\n        self.assertTrue(\"mod\" in autocomplete_2[1].text.lower())\n        self.assertTrue(\"your\" in autocomplete_2[2].text.lower())\n        self.assertEqual(len([result.pk for result in autocomplete_2]), 13)\n\n        # Test multiple fields.\n        autocomplete_3 = self.sqs.autocomplete(text_auto=\"Django\", name_auto=\"dan\")\n        self.assertEqual(autocomplete_3.count(), 4)\n        self.assertEqual(\n            set([result.pk for result in autocomplete_3]), set([\"12\", \"1\", \"22\", \"14\"])\n        )\n        self.assertEqual(len([result.pk for result in autocomplete_3]), 4)\n\n        # Test numbers in phrases\n        autocomplete_4 = self.sqs.autocomplete(text_auto=\"Jen 867\")\n        self.assertEqual(autocomplete_4.count(), 1)\n        self.assertEqual(set([result.pk for result in autocomplete_4]), set([\"20\"]))\n\n        # Test numbers alone\n        autocomplete_4 = self.sqs.autocomplete(text_auto=\"867\")\n        self.assertEqual(autocomplete_4.count(), 1)\n        self.assertEqual(set([result.pk for result in autocomplete_4]), set([\"20\"]))\n\n\nclass LiveElasticsearchRoundTripTestCase(TestCase):\n    def setUp(self):\n        super().setUp()\n\n        # Wipe it clean.\n        clear_elasticsearch_index()\n\n        # Stow.\n        self.old_ui = connections[\"elasticsearch\"].get_unified_index()\n        self.ui = UnifiedIndex()\n        self.srtsi = ElasticsearchRoundTripSearchIndex()\n        self.ui.build(indexes=[self.srtsi])\n        connections[\"elasticsearch\"]._index = self.ui\n        self.sb = connections[\"elasticsearch\"].get_backend()\n\n        self.sqs = SearchQuerySet(\"elasticsearch\")\n\n        # Fake indexing.\n        mock = MockModel()\n        mock.id = 1\n        self.sb.update(self.srtsi, [mock])\n\n    def tearDown(self):\n        # Restore.\n        connections[\"elasticsearch\"]._index = self.old_ui\n        super().tearDown()\n\n    def test_round_trip(self):\n        results = self.sqs.filter(id=\"core.mockmodel.1\")\n\n        # Sanity check.\n        self.assertEqual(results.count(), 1)\n\n        # Check the individual fields.\n        result = results[0]\n        self.assertEqual(result.id, \"core.mockmodel.1\")\n        self.assertEqual(result.text, \"This is some example text.\")\n        self.assertEqual(result.name, \"Mister Pants\")\n        self.assertEqual(result.is_active, True)\n        self.assertEqual(result.post_count, 25)\n        self.assertEqual(result.average_rating, 3.6)\n        self.assertEqual(result.price, \"24.99\")\n        self.assertEqual(result.pub_date, datetime.date(2009, 11, 21))\n        self.assertEqual(result.created, datetime.datetime(2009, 11, 21, 21, 31, 00))\n        self.assertEqual(result.tags, [\"staff\", \"outdoor\", \"activist\", \"scientist\"])\n        self.assertEqual(result.sites, [3, 5, 1])\n\n\nclass LiveElasticsearchPickleTestCase(TestCase):\n    fixtures = [\"base_data.json\", \"bulk_data.json\"]\n\n    def setUp(self):\n        super().setUp()\n\n        # Wipe it clean.\n        clear_elasticsearch_index()\n\n        # Stow.\n        self.old_ui = connections[\"elasticsearch\"].get_unified_index()\n        self.ui = UnifiedIndex()\n        self.smmi = ElasticsearchMockModelSearchIndex()\n        self.sammi = ElasticsearchAnotherMockModelSearchIndex()\n        self.ui.build(indexes=[self.smmi, self.sammi])\n        connections[\"elasticsearch\"]._index = self.ui\n\n        self.sqs = SearchQuerySet(\"elasticsearch\")\n\n        self.smmi.update(using=\"elasticsearch\")\n        self.sammi.update(using=\"elasticsearch\")\n\n    def tearDown(self):\n        # Restore.\n        connections[\"elasticsearch\"]._index = self.old_ui\n        super().tearDown()\n\n    def test_pickling(self):\n        results = self.sqs.all()\n\n        for res in results:\n            # Make sure the cache is full.\n            pass\n\n        in_a_pickle = pickle.dumps(results)\n        like_a_cuke = pickle.loads(in_a_pickle)\n        self.assertEqual(len(like_a_cuke), len(results))\n        self.assertEqual(like_a_cuke[0].id, results[0].id)\n\n\nclass ElasticsearchBoostBackendTestCase(TestCase):\n    def setUp(self):\n        super().setUp()\n\n        # Wipe it clean.\n        self.raw_es = elasticsearch.Elasticsearch(\n            settings.HAYSTACK_CONNECTIONS[\"elasticsearch\"][\"URL\"]\n        )\n        clear_elasticsearch_index()\n\n        # Stow.\n        self.old_ui = connections[\"elasticsearch\"].get_unified_index()\n        self.ui = UnifiedIndex()\n        self.smmi = ElasticsearchBoostMockSearchIndex()\n        self.ui.build(indexes=[self.smmi])\n        connections[\"elasticsearch\"]._index = self.ui\n        self.sb = connections[\"elasticsearch\"].get_backend()\n\n        self.sample_objs = []\n\n        for i in range(1, 5):\n            mock = AFourthMockModel()\n            mock.id = i\n\n            if i % 2:\n                mock.author = \"daniel\"\n                mock.editor = \"david\"\n            else:\n                mock.author = \"david\"\n                mock.editor = \"daniel\"\n\n            mock.pub_date = datetime.date(2009, 2, 25) - datetime.timedelta(days=i)\n            self.sample_objs.append(mock)\n\n    def tearDown(self):\n        connections[\"elasticsearch\"]._index = self.old_ui\n        super().tearDown()\n\n    def raw_search(self, query):\n        return self.raw_es.search(\n            q=\"*:*\", index=settings.HAYSTACK_CONNECTIONS[\"elasticsearch\"][\"INDEX_NAME\"]\n        )\n\n    def test_boost(self):\n        self.sb.update(self.smmi, self.sample_objs)\n        self.assertEqual(self.raw_search(\"*:*\")[\"hits\"][\"total\"], 4)\n\n        results = SearchQuerySet(using=\"elasticsearch\").filter(\n            SQ(author=\"daniel\") | SQ(editor=\"daniel\")\n        )\n\n        self.assertEqual(\n            set([result.id for result in results]),\n            set(\n                [\n                    \"core.afourthmockmodel.4\",\n                    \"core.afourthmockmodel.3\",\n                    \"core.afourthmockmodel.1\",\n                    \"core.afourthmockmodel.2\",\n                ]\n            ),\n        )\n\n    def test__to_python(self):\n        self.assertEqual(self.sb._to_python(\"abc\"), \"abc\")\n        self.assertEqual(self.sb._to_python(\"1\"), 1)\n        self.assertEqual(self.sb._to_python(\"2653\"), 2653)\n        self.assertEqual(self.sb._to_python(\"25.5\"), 25.5)\n        self.assertEqual(self.sb._to_python(\"[1, 2, 3]\"), [1, 2, 3])\n        self.assertEqual(\n            self.sb._to_python('{\"a\": 1, \"b\": 2, \"c\": 3}'), {\"a\": 1, \"c\": 3, \"b\": 2}\n        )\n        self.assertEqual(\n            self.sb._to_python(\"2009-05-09T16:14:00\"),\n            datetime.datetime(2009, 5, 9, 16, 14),\n        )\n        self.assertEqual(\n            self.sb._to_python(\"2009-05-09T00:00:00\"),\n            datetime.datetime(2009, 5, 9, 0, 0),\n        )\n        self.assertEqual(self.sb._to_python(None), None)\n\n\nclass RecreateIndexTestCase(TestCase):\n    def setUp(self):\n        self.raw_es = elasticsearch.Elasticsearch(\n            settings.HAYSTACK_CONNECTIONS[\"elasticsearch\"][\"URL\"]\n        )\n\n    def test_recreate_index(self):\n        clear_elasticsearch_index()\n\n        sb = connections[\"elasticsearch\"].get_backend()\n        sb.silently_fail = True\n        sb.setup()\n\n        original_mapping = self.raw_es.indices.get_mapping(index=sb.index_name)\n\n        sb.clear()\n        sb.setup()\n\n        try:\n            updated_mapping = self.raw_es.indices.get_mapping(sb.index_name)\n        except elasticsearch.NotFoundError:\n            self.fail(\"There is no mapping after recreating the index\")\n\n        self.assertEqual(\n            original_mapping,\n            updated_mapping,\n            \"Mapping after recreating the index differs from the original one\",\n        )\n\n\nclass ElasticsearchFacetingTestCase(TestCase):\n    def setUp(self):\n        super().setUp()\n\n        # Wipe it clean.\n        clear_elasticsearch_index()\n\n        # Stow.\n        self.old_ui = connections[\"elasticsearch\"].get_unified_index()\n        self.ui = UnifiedIndex()\n        self.smmi = ElasticsearchFacetingMockSearchIndex()\n        self.ui.build(indexes=[self.smmi])\n        connections[\"elasticsearch\"]._index = self.ui\n        self.sb = connections[\"elasticsearch\"].get_backend()\n\n        # Force the backend to rebuild the mapping each time.\n        self.sb.existing_mapping = {}\n        self.sb.setup()\n\n        self.sample_objs = []\n\n        for i in range(1, 10):\n            mock = AFourthMockModel()\n            mock.id = i\n            if i > 5:\n                mock.editor = \"George Taylor\"\n            else:\n                mock.editor = \"Perry White\"\n            if i % 2:\n                mock.author = \"Daniel Lindsley\"\n            else:\n                mock.author = \"Dan Watson\"\n            mock.pub_date = datetime.date(2013, 9, (i % 4) + 1)\n            self.sample_objs.append(mock)\n\n    def tearDown(self):\n        connections[\"elasticsearch\"]._index = self.old_ui\n        super().tearDown()\n\n    def test_facet(self):\n        self.sb.update(self.smmi, self.sample_objs)\n        counts = (\n            SearchQuerySet(\"elasticsearch\")\n            .facet(\"author\")\n            .facet(\"editor\")\n            .facet_counts()\n        )\n        self.assertEqual(\n            counts[\"fields\"][\"author\"], [(\"Daniel Lindsley\", 5), (\"Dan Watson\", 4)]\n        )\n        self.assertEqual(\n            counts[\"fields\"][\"editor\"], [(\"Perry White\", 5), (\"George Taylor\", 4)]\n        )\n        counts = (\n            SearchQuerySet(\"elasticsearch\")\n            .filter(content=\"white\")\n            .facet(\"facet_field\", order=\"reverse_count\")\n            .facet_counts()\n        )\n        self.assertEqual(\n            counts[\"fields\"][\"facet_field\"], [(\"Dan Watson\", 2), (\"Daniel Lindsley\", 3)]\n        )\n\n    def test_multiple_narrow(self):\n        self.sb.update(self.smmi, self.sample_objs)\n        counts = (\n            SearchQuerySet(\"elasticsearch\")\n            .narrow('editor_exact:\"Perry White\"')\n            .narrow('author_exact:\"Daniel Lindsley\"')\n            .facet(\"author\")\n            .facet_counts()\n        )\n        self.assertEqual(counts[\"fields\"][\"author\"], [(\"Daniel Lindsley\", 3)])\n\n    def test_narrow(self):\n        self.sb.update(self.smmi, self.sample_objs)\n        counts = (\n            SearchQuerySet(\"elasticsearch\")\n            .facet(\"author\")\n            .facet(\"editor\")\n            .narrow('editor_exact:\"Perry White\"')\n            .facet_counts()\n        )\n        self.assertEqual(\n            counts[\"fields\"][\"author\"], [(\"Daniel Lindsley\", 3), (\"Dan Watson\", 2)]\n        )\n        self.assertEqual(counts[\"fields\"][\"editor\"], [(\"Perry White\", 5)])\n\n    def test_date_facet(self):\n        self.sb.update(self.smmi, self.sample_objs)\n        start = datetime.date(2013, 9, 1)\n        end = datetime.date(2013, 9, 30)\n        # Facet by day\n        counts = (\n            SearchQuerySet(\"elasticsearch\")\n            .date_facet(\"pub_date\", start_date=start, end_date=end, gap_by=\"day\")\n            .facet_counts()\n        )\n        self.assertEqual(\n            counts[\"dates\"][\"pub_date\"],\n            [\n                (datetime.datetime(2013, 9, 1), 2),\n                (datetime.datetime(2013, 9, 2), 3),\n                (datetime.datetime(2013, 9, 3), 2),\n                (datetime.datetime(2013, 9, 4), 2),\n            ],\n        )\n        # By month\n        counts = (\n            SearchQuerySet(\"elasticsearch\")\n            .date_facet(\"pub_date\", start_date=start, end_date=end, gap_by=\"month\")\n            .facet_counts()\n        )\n        self.assertEqual(\n            counts[\"dates\"][\"pub_date\"], [(datetime.datetime(2013, 9, 1), 9)]\n        )\n"
  },
  {
    "path": "test_haystack/elasticsearch_tests/test_elasticsearch_query.py",
    "content": "import datetime\n\nimport elasticsearch\nfrom django.contrib.gis.measure import D\nfrom django.test import TestCase\n\nfrom haystack import connections\nfrom haystack.inputs import Exact\nfrom haystack.models import SearchResult\nfrom haystack.query import SQ, SearchQuerySet\n\nfrom ..core.models import AnotherMockModel, MockModel\n\n\nclass ElasticsearchSearchQueryTestCase(TestCase):\n    fixtures = [\"base_data\"]\n\n    def setUp(self):\n        super().setUp()\n        self.sq = connections[\"elasticsearch\"].get_query()\n\n    def test_build_query_all(self):\n        self.assertEqual(self.sq.build_query(), \"*:*\")\n\n    def test_build_query_single_word(self):\n        self.sq.add_filter(SQ(content=\"hello\"))\n        self.assertEqual(self.sq.build_query(), \"(hello)\")\n\n    def test_build_query_boolean(self):\n        self.sq.add_filter(SQ(content=True))\n        self.assertEqual(self.sq.build_query(), \"(True)\")\n\n    def test_regression_slash_search(self):\n        self.sq.add_filter(SQ(content=\"hello/\"))\n        self.assertEqual(self.sq.build_query(), \"(hello\\\\/)\")\n\n    def test_build_query_datetime(self):\n        self.sq.add_filter(SQ(content=datetime.datetime(2009, 5, 8, 11, 28)))\n        self.assertEqual(self.sq.build_query(), \"(2009-05-08T11:28:00)\")\n\n    def test_build_query_multiple_words_and(self):\n        self.sq.add_filter(SQ(content=\"hello\"))\n        self.sq.add_filter(SQ(content=\"world\"))\n        self.assertEqual(self.sq.build_query(), \"((hello) AND (world))\")\n\n    def test_build_query_multiple_words_not(self):\n        self.sq.add_filter(~SQ(content=\"hello\"))\n        self.sq.add_filter(~SQ(content=\"world\"))\n        self.assertEqual(self.sq.build_query(), \"(NOT ((hello)) AND NOT ((world)))\")\n\n    def test_build_query_multiple_words_or(self):\n        self.sq.add_filter(~SQ(content=\"hello\"))\n        self.sq.add_filter(SQ(content=\"hello\"), use_or=True)\n        self.assertEqual(self.sq.build_query(), \"(NOT ((hello)) OR (hello))\")\n\n    def test_build_query_multiple_words_mixed(self):\n        self.sq.add_filter(SQ(content=\"why\"))\n        self.sq.add_filter(SQ(content=\"hello\"), use_or=True)\n        self.sq.add_filter(~SQ(content=\"world\"))\n        self.assertEqual(\n            self.sq.build_query(), \"(((why) OR (hello)) AND NOT ((world)))\"\n        )\n\n    def test_build_query_phrase(self):\n        self.sq.add_filter(SQ(content=\"hello world\"))\n        self.assertEqual(self.sq.build_query(), \"(hello AND world)\")\n\n        self.sq.add_filter(SQ(content__exact=\"hello world\"))\n        self.assertEqual(\n            self.sq.build_query(), '((hello AND world) AND (\"hello world\"))'\n        )\n\n    def test_build_query_boost(self):\n        self.sq.add_filter(SQ(content=\"hello\"))\n        self.sq.add_boost(\"world\", 5)\n        self.assertEqual(self.sq.build_query(), \"(hello) world^5\")\n\n    def test_build_query_multiple_filter_types(self):\n        self.sq.add_filter(SQ(content=\"why\"))\n        self.sq.add_filter(SQ(pub_date__lte=Exact(\"2009-02-10 01:59:00\")))\n        self.sq.add_filter(SQ(author__gt=\"daniel\"))\n        self.sq.add_filter(SQ(created__lt=Exact(\"2009-02-12 12:13:00\")))\n        self.sq.add_filter(SQ(title__gte=\"B\"))\n        self.sq.add_filter(SQ(id__in=[1, 2, 3]))\n        self.sq.add_filter(SQ(rating__range=[3, 5]))\n        self.assertEqual(\n            self.sq.build_query(),\n            '((why) AND pub_date:([* TO \"2009-02-10 01:59:00\"]) AND author:({\"daniel\" TO *}) AND created:({* TO \"2009-02-12 12:13:00\"}) AND title:([\"B\" TO *]) AND id:(\"1\" OR \"2\" OR \"3\") AND rating:([\"3\" TO \"5\"]))',\n        )\n\n    def test_build_query_multiple_filter_types_with_datetimes(self):\n        self.sq.add_filter(SQ(content=\"why\"))\n        self.sq.add_filter(SQ(pub_date__lte=datetime.datetime(2009, 2, 10, 1, 59, 0)))\n        self.sq.add_filter(SQ(author__gt=\"daniel\"))\n        self.sq.add_filter(SQ(created__lt=datetime.datetime(2009, 2, 12, 12, 13, 0)))\n        self.sq.add_filter(SQ(title__gte=\"B\"))\n        self.sq.add_filter(SQ(id__in=[1, 2, 3]))\n        self.sq.add_filter(SQ(rating__range=[3, 5]))\n        self.assertEqual(\n            self.sq.build_query(),\n            '((why) AND pub_date:([* TO \"2009-02-10T01:59:00\"]) AND author:({\"daniel\" TO *}) AND created:({* TO \"2009-02-12T12:13:00\"}) AND title:([\"B\" TO *]) AND id:(\"1\" OR \"2\" OR \"3\") AND rating:([\"3\" TO \"5\"]))',\n        )\n\n    def test_build_query_in_filter_multiple_words(self):\n        self.sq.add_filter(SQ(content=\"why\"))\n        self.sq.add_filter(SQ(title__in=[\"A Famous Paper\", \"An Infamous Article\"]))\n        self.assertEqual(\n            self.sq.build_query(),\n            '((why) AND title:(\"A Famous Paper\" OR \"An Infamous Article\"))',\n        )\n\n    def test_build_query_in_filter_datetime(self):\n        self.sq.add_filter(SQ(content=\"why\"))\n        self.sq.add_filter(SQ(pub_date__in=[datetime.datetime(2009, 7, 6, 1, 56, 21)]))\n        self.assertEqual(\n            self.sq.build_query(), '((why) AND pub_date:(\"2009-07-06T01:56:21\"))'\n        )\n\n    def test_build_query_in_with_set(self):\n        self.sq.add_filter(SQ(content=\"why\"))\n        self.sq.add_filter(SQ(title__in=set([\"A Famous Paper\", \"An Infamous Article\"])))\n        self.assertTrue(\"((why) AND title:(\" in self.sq.build_query())\n        self.assertTrue('\"A Famous Paper\"' in self.sq.build_query())\n        self.assertTrue('\"An Infamous Article\"' in self.sq.build_query())\n\n    def test_build_query_wildcard_filter_types(self):\n        self.sq.add_filter(SQ(content=\"why\"))\n        self.sq.add_filter(SQ(title__startswith=\"haystack\"))\n        self.assertEqual(self.sq.build_query(), \"((why) AND title:(haystack*))\")\n\n    def test_build_query_fuzzy_filter_types(self):\n        self.sq.add_filter(SQ(content=\"why\"))\n        self.sq.add_filter(SQ(title__fuzzy=\"haystack\"))\n        self.assertEqual(self.sq.build_query(), \"((why) AND title:(haystack~))\")\n\n    def test_build_query_with_contains(self):\n        self.sq.add_filter(SQ(content=\"circular\"))\n        self.sq.add_filter(SQ(title__contains=\"haystack\"))\n        self.assertEqual(self.sq.build_query(), \"((circular) AND title:(*haystack*))\")\n\n    def test_build_query_with_endswith(self):\n        self.sq.add_filter(SQ(content=\"circular\"))\n        self.sq.add_filter(SQ(title__endswith=\"haystack\"))\n        self.assertEqual(self.sq.build_query(), \"((circular) AND title:(*haystack))\")\n\n    def test_clean(self):\n        self.assertEqual(self.sq.clean(\"hello world\"), \"hello world\")\n        self.assertEqual(self.sq.clean(\"hello AND world\"), \"hello and world\")\n        self.assertEqual(\n            self.sq.clean(\n                r'hello AND OR NOT TO + - && || ! ( ) { } [ ] ^ \" ~ * ? : \\ / world'\n            ),\n            'hello and or not to \\\\+ \\\\- \\\\&& \\\\|| \\\\! \\\\( \\\\) \\\\{ \\\\} \\\\[ \\\\] \\\\^ \\\\\" \\\\~ \\\\* \\\\? \\\\: \\\\\\\\ \\\\/ world',\n        )\n        self.assertEqual(\n            self.sq.clean(\"so please NOTe i am in a bAND and bORed\"),\n            \"so please NOTe i am in a bAND and bORed\",\n        )\n\n    def test_build_query_with_models(self):\n        self.sq.add_filter(SQ(content=\"hello\"))\n        self.sq.add_model(MockModel)\n        self.assertEqual(self.sq.build_query(), \"(hello)\")\n\n        self.sq.add_model(AnotherMockModel)\n        self.assertEqual(self.sq.build_query(), \"(hello)\")\n\n    def test_set_result_class(self):\n        # Assert that we're defaulting to ``SearchResult``.\n        self.assertTrue(issubclass(self.sq.result_class, SearchResult))\n\n        # Custom class.\n        class IttyBittyResult(object):\n            pass\n\n        self.sq.set_result_class(IttyBittyResult)\n        self.assertTrue(issubclass(self.sq.result_class, IttyBittyResult))\n\n        # Reset to default.\n        self.sq.set_result_class(None)\n        self.assertTrue(issubclass(self.sq.result_class, SearchResult))\n\n    def test_in_filter_values_list(self):\n        self.sq.add_filter(SQ(content=\"why\"))\n        self.sq.add_filter(SQ(title__in=MockModel.objects.values_list(\"id\", flat=True)))\n        self.assertEqual(self.sq.build_query(), '((why) AND title:(\"1\" OR \"2\" OR \"3\"))')\n\n    def test_narrow_sq(self):\n        sqs = SearchQuerySet(using=\"elasticsearch\").narrow(SQ(foo=\"moof\"))\n        self.assertTrue(isinstance(sqs, SearchQuerySet))\n        self.assertEqual(len(sqs.query.narrow_queries), 1)\n        self.assertEqual(sqs.query.narrow_queries.pop(), \"foo:(moof)\")\n\n    def test_query__in(self):\n        sqs = SearchQuerySet(using=\"elasticsearch\").filter(id__in=[1, 2, 3])\n        self.assertEqual(sqs.query.build_query(), 'id:(\"1\" OR \"2\" OR \"3\")')\n\n    def test_query__in_empty_list(self):\n        \"\"\"Confirm that an empty list avoids a Elasticsearch exception\"\"\"\n        sqs = SearchQuerySet(using=\"elasticsearch\").filter(id__in=[])\n        self.assertEqual(sqs.query.build_query(), \"id:(!*:*)\")\n\n\nclass ElasticsearchSearchQuerySpatialBeforeReleaseTestCase(TestCase):\n    def setUp(self):\n        super().setUp()\n        self.backend = connections[\"elasticsearch\"].get_backend()\n        self._elasticsearch_version = elasticsearch.VERSION\n        elasticsearch.VERSION = (0, 9, 9)\n\n    def tearDown(self):\n        elasticsearch.VERSION = self._elasticsearch_version\n\n    def test_build_query_with_dwithin_range(self):\n        \"\"\"\n        Test build_search_kwargs with dwithin range for Elasticsearch versions < 1.0.0\n        \"\"\"\n        from django.contrib.gis.geos import Point\n\n        search_kwargs = self.backend.build_search_kwargs(\n            \"where\",\n            dwithin={\n                \"field\": \"location_field\",\n                \"point\": Point(1.2345678, 2.3456789),\n                \"distance\": D(m=500),\n            },\n        )\n        self.assertEqual(\n            search_kwargs[\"query\"][\"filtered\"][\"filter\"][\"bool\"][\"must\"][1][\n                \"geo_distance\"\n            ],\n            {\"distance\": 0.5, \"location_field\": {\"lat\": 2.3456789, \"lon\": 1.2345678}},\n        )\n\n\nclass ElasticsearchSearchQuerySpatialAfterReleaseTestCase(TestCase):\n    def setUp(self):\n        super().setUp()\n        self.backend = connections[\"elasticsearch\"].get_backend()\n        self._elasticsearch_version = elasticsearch.VERSION\n        elasticsearch.VERSION = (1, 0, 0)\n\n    def tearDown(self):\n        elasticsearch.VERSION = self._elasticsearch_version\n\n    def test_build_query_with_dwithin_range(self):\n        \"\"\"\n        Test build_search_kwargs with dwithin range for Elasticsearch versions >= 1.0.0\n        \"\"\"\n        from django.contrib.gis.geos import Point\n\n        search_kwargs = self.backend.build_search_kwargs(\n            \"where\",\n            dwithin={\n                \"field\": \"location_field\",\n                \"point\": Point(1.2345678, 2.3456789),\n                \"distance\": D(m=500),\n            },\n        )\n        self.assertEqual(\n            search_kwargs[\"query\"][\"filtered\"][\"filter\"][\"bool\"][\"must\"][1][\n                \"geo_distance\"\n            ],\n            {\n                \"distance\": \"0.500000km\",\n                \"location_field\": {\"lat\": 2.3456789, \"lon\": 1.2345678},\n            },\n        )\n"
  },
  {
    "path": "test_haystack/elasticsearch_tests/test_inputs.py",
    "content": "from django.test import TestCase\n\nfrom haystack import connections, inputs\n\n\nclass ElasticsearchInputTestCase(TestCase):\n    def setUp(self):\n        super().setUp()\n        self.query_obj = connections[\"elasticsearch\"].get_query()\n\n    def test_raw_init(self):\n        raw = inputs.Raw(\"hello OR there, :you\")\n        self.assertEqual(raw.query_string, \"hello OR there, :you\")\n        self.assertEqual(raw.kwargs, {})\n        self.assertEqual(raw.post_process, False)\n\n        raw = inputs.Raw(\"hello OR there, :you\", test=\"really\")\n        self.assertEqual(raw.query_string, \"hello OR there, :you\")\n        self.assertEqual(raw.kwargs, {\"test\": \"really\"})\n        self.assertEqual(raw.post_process, False)\n\n    def test_raw_prepare(self):\n        raw = inputs.Raw(\"hello OR there, :you\")\n        self.assertEqual(raw.prepare(self.query_obj), \"hello OR there, :you\")\n\n    def test_clean_init(self):\n        clean = inputs.Clean(\"hello OR there, :you\")\n        self.assertEqual(clean.query_string, \"hello OR there, :you\")\n        self.assertEqual(clean.post_process, True)\n\n    def test_clean_prepare(self):\n        clean = inputs.Clean(\"hello OR there, :you\")\n        self.assertEqual(clean.prepare(self.query_obj), \"hello or there, \\\\:you\")\n\n    def test_exact_init(self):\n        exact = inputs.Exact(\"hello OR there, :you\")\n        self.assertEqual(exact.query_string, \"hello OR there, :you\")\n        self.assertEqual(exact.post_process, True)\n\n    def test_exact_prepare(self):\n        exact = inputs.Exact(\"hello OR there, :you\")\n        self.assertEqual(exact.prepare(self.query_obj), '\"hello OR there, :you\"')\n\n        exact = inputs.Exact(\"hello OR there, :you\", clean=True)\n        self.assertEqual(exact.prepare(self.query_obj), '\"hello or there, \\\\:you\"')\n\n    def test_not_init(self):\n        not_it = inputs.Not(\"hello OR there, :you\")\n        self.assertEqual(not_it.query_string, \"hello OR there, :you\")\n        self.assertEqual(not_it.post_process, True)\n\n    def test_not_prepare(self):\n        not_it = inputs.Not(\"hello OR there, :you\")\n        self.assertEqual(not_it.prepare(self.query_obj), \"NOT (hello or there, \\\\:you)\")\n\n    def test_autoquery_init(self):\n        autoquery = inputs.AutoQuery('panic -don\\'t \"froody dude\"')\n        self.assertEqual(autoquery.query_string, 'panic -don\\'t \"froody dude\"')\n        self.assertEqual(autoquery.post_process, False)\n\n    def test_autoquery_prepare(self):\n        autoquery = inputs.AutoQuery('panic -don\\'t \"froody dude\"')\n        self.assertEqual(\n            autoquery.prepare(self.query_obj), 'panic NOT don\\'t \"froody dude\"'\n        )\n\n    def test_altparser_init(self):\n        altparser = inputs.AltParser(\"dismax\")\n        self.assertEqual(altparser.parser_name, \"dismax\")\n        self.assertEqual(altparser.query_string, \"\")\n        self.assertEqual(altparser.kwargs, {})\n        self.assertEqual(altparser.post_process, False)\n\n        altparser = inputs.AltParser(\"dismax\", \"douglas adams\", qf=\"author\", mm=1)\n        self.assertEqual(altparser.parser_name, \"dismax\")\n        self.assertEqual(altparser.query_string, \"douglas adams\")\n        self.assertEqual(altparser.kwargs, {\"mm\": 1, \"qf\": \"author\"})\n        self.assertEqual(altparser.post_process, False)\n\n    def test_altparser_prepare(self):\n        altparser = inputs.AltParser(\"dismax\", \"douglas adams\", qf=\"author\", mm=1)\n        self.assertEqual(\n            altparser.prepare(self.query_obj),\n            \"\"\"{!dismax mm=1 qf=author v='douglas adams'}\"\"\",\n        )\n"
  },
  {
    "path": "test_haystack/mocks.py",
    "content": "from django.apps import apps\n\nfrom haystack.backends import BaseEngine, BaseSearchBackend, BaseSearchQuery, log_query\nfrom haystack.models import SearchResult\nfrom haystack.routers import BaseRouter\nfrom haystack.utils import get_identifier\n\n\nclass MockMasterSlaveRouter(BaseRouter):\n    def for_read(self, **hints):\n        return \"slave\"\n\n    def for_write(self, **hints):\n        return \"master\"\n\n\nclass MockPassthroughRouter(BaseRouter):\n    def for_read(self, **hints):\n        if hints.get(\"pass_through\") is False:\n            return \"pass\"\n\n        return None\n\n    def for_write(self, **hints):\n        if hints.get(\"pass_through\") is False:\n            return \"pass\"\n\n        return None\n\n\nclass MockMultiRouter(BaseRouter):\n    def for_write(self, **hints):\n        return [\"multi1\", \"multi2\"]\n\n\nclass MockSearchResult(SearchResult):\n    def __init__(self, app_label, model_name, pk, score, **kwargs):\n        super().__init__(app_label, model_name, pk, score, **kwargs)\n        self._model = apps.get_model(\"core\", model_name)\n\n\nMOCK_SEARCH_RESULTS = [\n    MockSearchResult(\"core\", \"MockModel\", i, 1 - (i / 100.0)) for i in range(1, 100)\n]\nMOCK_INDEX_DATA = {}\n\n\nclass MockSearchBackend(BaseSearchBackend):\n    model_name = \"mockmodel\"\n\n    def update(self, index, iterable, commit=True):\n        global MOCK_INDEX_DATA\n        for obj in iterable:\n            doc = index.full_prepare(obj)\n            MOCK_INDEX_DATA[doc[\"id\"]] = doc\n\n    def remove(self, obj, commit=True):\n        global MOCK_INDEX_DATA\n        if commit:\n            del MOCK_INDEX_DATA[get_identifier(obj)]\n\n    def clear(self, models=None, commit=True):\n        global MOCK_INDEX_DATA\n        MOCK_INDEX_DATA = {}\n\n    @log_query\n    def search(self, query_string, **kwargs):\n        from haystack import connections\n\n        global MOCK_INDEX_DATA\n        results = []\n        hits = len(MOCK_INDEX_DATA)\n        indexed_models = connections[\"default\"].get_unified_index().get_indexed_models()\n\n        def junk_sort(key):\n            app, model, pk = key.split(\".\")\n\n            if pk.isdigit():\n                return int(pk)\n            else:\n                return ord(pk[0])\n\n        sliced = sorted(MOCK_INDEX_DATA, key=junk_sort)\n\n        for i, result in enumerate(sliced):\n            app_label, model_name, pk = result.split(\".\")\n            model = apps.get_model(app_label, model_name)\n\n            if model:\n                if model in indexed_models:\n                    results.append(\n                        MockSearchResult(app_label, model_name, pk, 1 - (i / 100.0))\n                    )\n                else:\n                    hits -= 1\n            else:\n                hits -= 1\n\n        return {\n            \"results\": results[kwargs.get(\"start_offset\") : kwargs.get(\"end_offset\")],\n            \"hits\": hits,\n        }\n\n    def more_like_this(\n        self, model_instance, additional_query_string=None, result_class=None\n    ):\n        return self.search(query_string=\"*\")\n\n\nclass CharPKMockSearchBackend(MockSearchBackend):\n    model_name = \"charpkmockmodel\"\n    mock_search_results = [\n        MockSearchResult(\"core\", \"CharPKMockModel\", \"sometext\", 0.5),\n        MockSearchResult(\"core\", \"CharPKMockModel\", \"1234\", 0.3),\n    ]\n\n\nclass UUIDMockSearchBackend(MockSearchBackend):\n    model_name = \"uuidmockmodel\"\n    mock_search_results = [\n        MockSearchResult(\n            \"core\", \"UUIDMockModel\", \"53554c58-7051-4350-bcc9-dad75eb248a9\", 0.5\n        ),\n        MockSearchResult(\n            \"core\", \"UUIDMockModel\", \"77554c58-7051-4350-bcc9-dad75eb24888\", 0.5\n        ),\n    ]\n\n\nclass ReadQuerySetMockSearchBackend(MockSearchBackend):\n    model_name = \"afifthmockmodel\"\n    mock_search_results = [\n        MockSearchResult(\"core\", \"afifthmockmodel\", 1, 2),\n        MockSearchResult(\"core\", \"afifthmockmodel\", 2, 2),\n    ]\n\n\nclass MixedMockSearchBackend(MockSearchBackend):\n    @log_query\n    def search(self, query_string, **kwargs):\n        if kwargs.get(\"end_offset\") and kwargs[\"end_offset\"] > 30:\n            kwargs[\"end_offset\"] = 30\n\n        result_info = super().search(query_string, **kwargs)\n        result_info[\"hits\"] = 30\n\n        # Remove search results from other models.\n        temp_results = []\n\n        for result in result_info[\"results\"]:\n            if not int(result.pk) in (9, 13, 14):\n                # MockSearchResult('core', 'AnotherMockModel', 9, .1)\n                # MockSearchResult('core', 'AnotherMockModel', 13, .1)\n                # MockSearchResult('core', 'NonexistentMockModel', 14, .1)\n                temp_results.append(result)\n\n        result_info[\"results\"] = temp_results\n\n        return result_info\n\n\nclass MockSearchQuery(BaseSearchQuery):\n    def build_query(self):\n        return \"\"\n\n    def clean(self, query_fragment):\n        return query_fragment\n\n    # def run_mlt(self):\n    #     # To simulate the chunking behavior of a regular search, return a slice\n    #     # of our results using start/end offset.\n    #     final_query = self.build_query()\n    #     results = self.backend.more_like_this(self._mlt_instance, final_query)\n    #     import pdb; pdb.set_trace()\n    #     self._results = results['results'][self.start_offset:self.end_offset]\n    #     self._hit_count = results['hits']\n\n\nclass MockEngine(BaseEngine):\n    backend = MockSearchBackend\n    query = MockSearchQuery\n"
  },
  {
    "path": "test_haystack/multipleindex/__init__.py",
    "content": "from django.apps import apps\n\nimport haystack\nfrom haystack.signals import RealtimeSignalProcessor\n\nfrom ..utils import check_solr\n\n_old_sp = None\n\n\ndef setup():\n    check_solr()\n    global _old_sp\n    config = apps.get_app_config(\"haystack\")\n    _old_sp = config.signal_processor\n    config.signal_processor = RealtimeSignalProcessor(\n        haystack.connections, haystack.connection_router\n    )\n\n\ndef teardown():\n    config = apps.get_app_config(\"haystack\")\n    config.signal_processor.teardown()\n    config.signal_processor = _old_sp\n"
  },
  {
    "path": "test_haystack/multipleindex/models.py",
    "content": "from django.db import models\n\n\nclass Foo(models.Model):\n    title = models.CharField(max_length=255)\n    body = models.TextField()\n\n    def __str__(self):\n        return self.title\n\n\nclass Bar(models.Model):\n    author = models.CharField(max_length=255)\n    content = models.TextField()\n\n    def __str__(self):\n        return self.author\n"
  },
  {
    "path": "test_haystack/multipleindex/routers.py",
    "content": "from haystack.routers import BaseRouter\n\n\nclass MultipleIndexRouter(BaseRouter):\n    def for_write(self, instance=None, **hints):\n        if instance and instance._meta.app_label == \"multipleindex\":\n            return \"solr\"\n"
  },
  {
    "path": "test_haystack/multipleindex/search_indexes.py",
    "content": "from haystack import indexes\nfrom haystack.indexes import Indexable, SearchIndex\n\nfrom .models import Bar, Foo\n\n\n# To test additional ignores...\nclass BaseIndex(indexes.SearchIndex):\n    text = indexes.CharField(document=True, model_attr=\"body\")\n\n    def get_model(self):\n        return Foo\n\n\nclass FooIndex(BaseIndex, indexes.Indexable):\n    def index_queryset(self, using=None):\n        qs = super().index_queryset(using=using)\n        if using == \"filtered_whoosh\":\n            qs = qs.filter(body__contains=\"1\")\n        return qs\n\n\n# Import the old way & make sure things don't explode.\n\n\nclass BarIndex(SearchIndex, Indexable):\n    text = indexes.CharField(document=True)\n\n    def get_model(self):\n        return Bar\n\n    def prepare_text(self, obj):\n        return \"%s\\n%s\" % (obj.author, obj.content)\n"
  },
  {
    "path": "test_haystack/multipleindex/tests.py",
    "content": "from django.db import models\n\nfrom haystack import connections\nfrom haystack.exceptions import NotHandled\nfrom haystack.query import SearchQuerySet\nfrom haystack.signals import BaseSignalProcessor\n\nfrom ..whoosh_tests.testcases import WhooshTestCase\nfrom .models import Bar, Foo\nfrom .search_indexes import BarIndex, FooIndex\n\n\nclass MultipleIndexTestCase(WhooshTestCase):\n    def setUp(self):\n        super().setUp()\n\n        self.ui = connections[\"solr\"].get_unified_index()\n        self.fi = self.ui.get_index(Foo)\n        self.bi = self.ui.get_index(Bar)\n        self.solr_backend = connections[\"solr\"].get_backend()\n        self.whoosh_backend = connections[\"whoosh\"].get_backend()\n        self.filtered_whoosh_backend = connections[\"filtered_whoosh\"].get_backend()\n\n        Foo.objects.bulk_create(\n            [\n                Foo(title=\"Haystack test\", body=\"foo 1\"),\n                Foo(title=\"Another Haystack test\", body=\"foo 2\"),\n            ]\n        )\n\n        Bar.objects.bulk_create(\n            [\n                Bar(author=\"Haystack test\", content=\"bar 1\"),\n                Bar(author=\"Another Haystack test\", content=\"bar 2\"),\n                Bar(author=\"Yet another Haystack test\", content=\"bar 3\"),\n            ]\n        )\n\n        self.fi.reindex(using=\"solr\")\n        self.fi.reindex(using=\"whoosh\")\n        self.bi.reindex(using=\"solr\")\n\n    def tearDown(self):\n        self.fi.clear(using=\"solr\")\n        self.bi.clear(using=\"solr\")\n        super().tearDown()\n\n    def test_index_update_object_using(self):\n        results = self.solr_backend.search(\"foo\")\n        self.assertEqual(results[\"hits\"], 2)\n        results = self.whoosh_backend.search(\"foo\")\n        self.assertEqual(results[\"hits\"], 2)\n\n        foo_3 = Foo.objects.create(title=\"Whee another Haystack test\", body=\"foo 3\")\n\n        self.fi.update_object(foo_3, using=\"solr\")\n        results = self.solr_backend.search(\"foo\")\n        self.assertEqual(results[\"hits\"], 3)\n        results = self.whoosh_backend.search(\"foo\")\n        self.assertEqual(results[\"hits\"], 2)\n\n        self.fi.update_object(foo_3, using=\"whoosh\")\n        results = self.solr_backend.search(\"foo\")\n        self.assertEqual(results[\"hits\"], 3)\n        results = self.whoosh_backend.search(\"foo\")\n        self.assertEqual(results[\"hits\"], 3)\n\n    def test_index_remove_object_using(self):\n        results = self.solr_backend.search(\"foo\")\n        self.assertEqual(results[\"hits\"], 2)\n        results = self.whoosh_backend.search(\"foo\")\n        self.assertEqual(results[\"hits\"], 2)\n\n        foo_1 = Foo.objects.get(pk=1)\n\n        self.fi.remove_object(foo_1, using=\"solr\")\n        results = self.solr_backend.search(\"foo\")\n        self.assertEqual(results[\"hits\"], 1)\n        results = self.whoosh_backend.search(\"foo\")\n        self.assertEqual(results[\"hits\"], 2)\n\n        self.fi.remove_object(foo_1, using=\"whoosh\")\n        results = self.solr_backend.search(\"foo\")\n        self.assertEqual(results[\"hits\"], 1)\n        results = self.whoosh_backend.search(\"foo\")\n        self.assertEqual(results[\"hits\"], 1)\n\n    def test_index_clear_using(self):\n        results = self.solr_backend.search(\"foo\")\n        self.assertEqual(results[\"hits\"], 2)\n        results = self.whoosh_backend.search(\"foo\")\n        self.assertEqual(results[\"hits\"], 2)\n\n        self.fi.clear(using=\"solr\")\n        results = self.solr_backend.search(\"foo\")\n        self.assertEqual(results[\"hits\"], 0)\n        results = self.whoosh_backend.search(\"foo\")\n        self.assertEqual(results[\"hits\"], 2)\n\n        self.fi.clear(using=\"whoosh\")\n        results = self.solr_backend.search(\"foo\")\n        self.assertEqual(results[\"hits\"], 0)\n        results = self.whoosh_backend.search(\"foo\")\n        self.assertEqual(results[\"hits\"], 0)\n\n    def test_index_update_using(self):\n        self.fi.clear(using=\"solr\")\n        self.fi.clear(using=\"whoosh\")\n        self.bi.clear(using=\"solr\")\n        self.bi.clear(using=\"whoosh\")\n\n        results = self.solr_backend.search(\"foo\")\n        self.assertEqual(results[\"hits\"], 0)\n        results = self.whoosh_backend.search(\"foo\")\n        self.assertEqual(results[\"hits\"], 0)\n\n        self.fi.update(using=\"solr\")\n        results = self.solr_backend.search(\"foo\")\n        self.assertEqual(results[\"hits\"], 2)\n        results = self.whoosh_backend.search(\"foo\")\n        self.assertEqual(results[\"hits\"], 0)\n\n        self.fi.update(using=\"whoosh\")\n        results = self.solr_backend.search(\"foo\")\n        self.assertEqual(results[\"hits\"], 2)\n        results = self.whoosh_backend.search(\"foo\")\n        self.assertEqual(results[\"hits\"], 2)\n\n    def test_searchqueryset_using(self):\n        # Using the default.\n        sqs = SearchQuerySet(\"solr\")\n        self.assertEqual(sqs.count(), 5)\n        self.assertEqual(sqs.models(Foo).count(), 2)\n        self.assertEqual(sqs.models(Bar).count(), 3)\n\n        self.assertEqual(sqs.using(\"solr\").count(), 5)\n        self.assertEqual(sqs.using(\"solr\").models(Foo).count(), 2)\n        self.assertEqual(sqs.using(\"solr\").models(Bar).count(), 3)\n\n        self.assertEqual(sqs.using(\"whoosh\").count(), 2)\n        self.assertEqual(sqs.using(\"whoosh\").models(Foo).count(), 2)\n        self.assertEqual(sqs.using(\"whoosh\").models(Bar).count(), 0)\n\n    def test_searchquery_using(self):\n        sq = connections[\"solr\"].get_query()\n\n        # Using the default.\n        self.assertEqual(sq.get_count(), 5)\n\n        # \"Swap\" to the default.\n        sq = sq.using(\"solr\")\n        self.assertEqual(sq.get_count(), 5)\n\n        # Swap the ``SearchQuery`` used.\n        sq = sq.using(\"whoosh\")\n        self.assertEqual(sq.get_count(), 2)\n\n    def test_excluded_indexes(self):\n        wui = connections[\"filtered_whoosh\"].get_unified_index()\n        self.assertTrue(any(isinstance(i, FooIndex) for i in wui.collect_indexes()))\n        self.assertFalse(any(isinstance(i, BarIndex) for i in wui.collect_indexes()))\n\n        # Shouldn't error.\n        wui.get_index(Foo)\n\n        # Should error, since it's not present.\n        self.assertRaises(NotHandled, wui.get_index, Bar)\n\n    def test_filtered_index_update(self):\n        for i in (\"whoosh\", \"filtered_whoosh\"):\n            self.fi.clear(using=i)\n            self.fi.update(using=i)\n\n        results = self.whoosh_backend.search(\"foo\")\n        self.assertEqual(results[\"hits\"], 2)\n\n        results = self.filtered_whoosh_backend.search(\"foo\")\n        self.assertEqual(\n            results[\"hits\"], 1, \"Filtered backend should only contain one record\"\n        )\n\n\nclass TestSignalProcessor(BaseSignalProcessor):\n    def setup(self):\n        self.setup_ran = True\n        super().setup()\n\n    def teardown(self):\n        self.teardown_ran = True\n        super().teardown()\n\n\nclass SignalProcessorTestCase(WhooshTestCase):\n    def setUp(self):\n        super().setUp()\n\n        # Blatantly wrong data, just for assertion purposes.\n        self.fake_connections = {}\n        self.fake_router = []\n\n        self.ui = connections[\"solr\"].get_unified_index()\n        self.fi = self.ui.get_index(Foo)\n        self.bi = self.ui.get_index(Bar)\n        self.solr_backend = connections[\"solr\"].get_backend()\n        self.whoosh_backend = connections[\"whoosh\"].get_backend()\n\n        self.foo_1 = Foo.objects.create(title=\"Haystack test\", body=\"foo 1\")\n        self.foo_2 = Foo.objects.create(title=\"Another Haystack test\", body=\"foo 2\")\n        self.bar_1 = Bar.objects.create(author=\"Haystack test\", content=\"bar 1\")\n        self.bar_2 = Bar.objects.create(author=\"Another Haystack test\", content=\"bar 2\")\n        self.bar_3 = Bar.objects.create(\n            author=\"Yet another Haystack test\", content=\"bar 3\"\n        )\n\n        self.fi.reindex(using=\"solr\")\n        self.fi.reindex(using=\"whoosh\")\n        self.bi.reindex(using=\"solr\")\n\n    def tearDown(self):\n        self.fi.clear(using=\"solr\")\n        self.bi.clear(using=\"solr\")\n        super().tearDown()\n\n    def test_init(self):\n        tsp = TestSignalProcessor(self.fake_connections, self.fake_router)\n        self.assertEqual(tsp.connections, self.fake_connections)\n        self.assertEqual(tsp.connection_router, self.fake_router)\n        # We fake some side-effects to make sure it ran.\n        self.assertTrue(tsp.setup_ran)\n\n        bsp = BaseSignalProcessor(self.fake_connections, self.fake_router)\n        self.assertFalse(getattr(bsp, \"setup_ran\", False))\n\n    def test_setup(self):\n        tsp = TestSignalProcessor(self.fake_connections, self.fake_router)\n        tsp.setup()\n        self.assertTrue(tsp.setup_ran)\n\n    def test_teardown(self):\n        tsp = TestSignalProcessor(self.fake_connections, self.fake_router)\n        tsp.teardown()\n        self.assertTrue(tsp.teardown_ran)\n\n    def test_handle_save(self):\n        # Because the code here is pretty leaky (abstraction-wise), we'll test\n        # the actual setup.\n        # First, ensure the signal is setup.\n        self.assertEqual(len(models.signals.post_save.receivers), 1)\n\n        # Second, check the existing search data.\n        sqs = SearchQuerySet(\"solr\")\n        self.assertEqual(sqs.using(\"solr\").count(), 5)\n        self.assertEqual(sqs.using(\"solr\").models(Foo).count(), 2)\n        self.assertEqual(sqs.using(\"solr\").models(Bar).count(), 3)\n        self.assertEqual(sqs.using(\"whoosh\").count(), 2)\n        self.assertEqual(sqs.using(\"whoosh\").models(Foo).count(), 2)\n\n        self.assertEqual(\n            sqs.using(\"solr\").models(Foo).order_by(\"django_id\")[0].text, \"foo 1\"\n        )\n        self.assertEqual(\n            sqs.using(\"whoosh\").models(Foo).order_by(\"django_id\")[0].text, \"foo 1\"\n        )\n\n        # Third, save the model, which should fire the signal & index the\n        # new data.\n        self.foo_1.body = \"A different body\"\n        self.foo_1.save()\n\n        # Fourth, check the search data for the updated data, making sure counts\n        # haven't changed.\n        sqs = SearchQuerySet(\"solr\")\n        self.assertEqual(sqs.using(\"solr\").count(), 5)\n        self.assertEqual(sqs.using(\"solr\").models(Foo).count(), 2)\n        self.assertEqual(sqs.using(\"solr\").models(Bar).count(), 3)\n        self.assertEqual(sqs.using(\"whoosh\").count(), 2)\n        self.assertEqual(sqs.using(\"whoosh\").models(Foo).count(), 2)\n\n        self.assertEqual(\n            sqs.using(\"solr\").models(Foo).order_by(\"django_id\")[0].text,\n            \"A different body\",\n        )\n        self.assertEqual(\n            sqs.using(\"whoosh\").models(Foo).order_by(\"django_id\")[0].text, \"foo 1\"\n        )\n\n    def test_handle_delete(self):\n        # Because the code here is pretty leaky (abstraction-wise), we'll test\n        # the actual setup.\n        # First, ensure the signal is setup.\n        self.assertEqual(len(models.signals.post_delete.receivers), 1)\n\n        # Second, check the existing search data.\n        sqs = SearchQuerySet(\"solr\")\n        self.assertEqual(sqs.using(\"solr\").count(), 5)\n        self.assertEqual(sqs.using(\"solr\").models(Foo).count(), 2)\n        self.assertEqual(sqs.using(\"solr\").models(Bar).count(), 3)\n        self.assertEqual(sqs.using(\"whoosh\").count(), 2)\n        self.assertEqual(sqs.using(\"whoosh\").models(Foo).count(), 2)\n\n        self.assertEqual(\n            sqs.using(\"solr\").models(Foo).order_by(\"django_id\")[0].text, \"foo 1\"\n        )\n        self.assertEqual(\n            sqs.using(\"whoosh\").models(Foo).order_by(\"django_id\")[0].text, \"foo 1\"\n        )\n\n        # Third, delete the model, which should fire the signal & remove the\n        # record from the index.\n        self.foo_1.delete()\n\n        # Fourth, check the search data for the now-removed data, making sure counts\n        # have changed correctly.\n        sqs = SearchQuerySet(\"solr\")\n        self.assertEqual(sqs.using(\"solr\").count(), 4)\n        self.assertEqual(sqs.using(\"solr\").models(Foo).count(), 1)\n        self.assertEqual(sqs.using(\"solr\").models(Bar).count(), 3)\n        self.assertEqual(sqs.using(\"whoosh\").count(), 2)\n        self.assertEqual(sqs.using(\"whoosh\").models(Foo).count(), 2)\n\n        self.assertEqual(\n            sqs.using(\"solr\").models(Foo).order_by(\"django_id\")[0].text, \"foo 2\"\n        )\n        self.assertEqual(\n            sqs.using(\"whoosh\").models(Foo).order_by(\"django_id\")[0].text, \"foo 1\"\n        )\n"
  },
  {
    "path": "test_haystack/results_per_page_urls.py",
    "content": "from django.conf.urls import url\n\nfrom haystack.views import SearchView\n\n\nclass CustomPerPage(SearchView):\n    results_per_page = 1\n\n\nurlpatterns = [\n    url(r\"^search/$\", CustomPerPage(load_all=False), name=\"haystack_search\"),\n    url(\n        r\"^search2/$\",\n        CustomPerPage(load_all=False, results_per_page=2),\n        name=\"haystack_search\",\n    ),\n]\n"
  },
  {
    "path": "test_haystack/run_tests.py",
    "content": "#!/usr/bin/env python\nimport sys\nfrom os.path import abspath, dirname\n\nimport nose\n\n\ndef run_all(argv=None):\n    sys.exitfunc = lambda: sys.stderr.write(\"Shutting down....\\n\")\n\n    # always insert coverage when running tests through setup.py\n    if argv is None:\n        argv = [\n            \"nosetests\",\n            \"--with-coverage\",\n            \"--cover-package=haystack\",\n            \"--cover-erase\",\n            \"--verbose\",\n        ]\n\n    nose.run_exit(argv=argv, defaultTest=abspath(dirname(__file__)))\n\n\nif __name__ == \"__main__\":\n    run_all(sys.argv)\n"
  },
  {
    "path": "test_haystack/settings.py",
    "content": "import os\nfrom tempfile import mkdtemp\n\nSECRET_KEY = \"Please do not spew DeprecationWarnings\"\n\n# Haystack settings for running tests.\nDATABASES = {\n    \"default\": {\"ENGINE\": \"django.db.backends.sqlite3\", \"NAME\": \"haystack_tests.db\"}\n}\n\nINSTALLED_APPS = [\n    \"django.contrib.admin\",\n    \"django.contrib.auth\",\n    \"django.contrib.contenttypes\",\n    \"django.contrib.sessions\",\n    \"django.contrib.messages\",\n    \"haystack\",\n    \"test_haystack.discovery\",\n    \"test_haystack.core\",\n    \"test_haystack.spatial\",\n    \"test_haystack.multipleindex\",\n    # This app exists to confirm that nothing breaks when INSTALLED_APPS has an app without models.py\n    # which is common in some cases for things like admin extensions, reporting, etc.\n    \"test_haystack.test_app_without_models\",\n    # Confirm that everything works with app labels which have more than one level of hierarchy\n    # as reported in https://github.com/django-haystack/django-haystack/issues/1152\n    \"test_haystack.test_app_with_hierarchy.contrib.django.hierarchal_app_django\",\n    \"test_haystack.test_app_using_appconfig.apps.SimpleTestAppConfig\",\n]\n\nTEMPLATES = [\n    {\n        \"BACKEND\": \"django.template.backends.django.DjangoTemplates\",\n        \"APP_DIRS\": True,\n        \"OPTIONS\": {\n            \"context_processors\": [\n                \"django.contrib.auth.context_processors.auth\",\n                \"django.contrib.messages.context_processors.messages\",\n            ]\n        },\n    }\n]\n\nMIDDLEWARE = [\n    \"django.middleware.common.CommonMiddleware\",\n    \"django.contrib.sessions.middleware.SessionMiddleware\",\n    \"django.middleware.csrf.CsrfViewMiddleware\",\n    \"django.contrib.auth.middleware.AuthenticationMiddleware\",\n    \"django.contrib.messages.middleware.MessageMiddleware\",\n]\n\nROOT_URLCONF = \"test_haystack.core.urls\"\n\nHAYSTACK_ROUTERS = [\n    \"haystack.routers.DefaultRouter\",\n    \"test_haystack.multipleindex.routers.MultipleIndexRouter\",\n]\n\nHAYSTACK_CONNECTIONS = {\n    \"default\": {\"ENGINE\": \"test_haystack.mocks.MockEngine\"},\n    \"whoosh\": {\n        \"ENGINE\": \"haystack.backends.whoosh_backend.WhooshEngine\",\n        \"PATH\": mkdtemp(prefix=\"test_whoosh_query\"),\n        \"INCLUDE_SPELLING\": True,\n    },\n    \"filtered_whoosh\": {\n        \"ENGINE\": \"haystack.backends.whoosh_backend.WhooshEngine\",\n        \"PATH\": mkdtemp(prefix=\"haystack-multipleindex-filtered-whoosh-tests-\"),\n        \"EXCLUDED_INDEXES\": [\"test_haystack.multipleindex.search_indexes.BarIndex\"],\n    },\n    \"elasticsearch\": {\n        \"ENGINE\": \"haystack.backends.elasticsearch_backend.ElasticsearchSearchEngine\",\n        \"URL\": os.environ.get(\"TEST_ELASTICSEARCH_1_URL\", \"http://localhost:9200/\"),\n        \"INDEX_NAME\": \"test_default\",\n        \"INCLUDE_SPELLING\": True,\n    },\n    \"simple\": {\"ENGINE\": \"haystack.backends.simple_backend.SimpleEngine\"},\n    \"solr\": {\n        \"ENGINE\": \"haystack.backends.solr_backend.SolrEngine\",\n        \"URL\": os.environ.get(\n            \"TEST_SOLR_URL\", \"http://localhost:9001/solr/collection1\"\n        ),\n        \"ADMIN_URL\": os.environ.get(\n            \"TEST_SOLR_ADMIN_URL\", \"http://localhost:9001/solr/admin/cores\"\n        ),\n        \"INCLUDE_SPELLING\": True,\n    },\n}\n\nif \"elasticsearch\" in HAYSTACK_CONNECTIONS:\n    try:\n        import elasticsearch\n\n        if (2,) <= elasticsearch.__version__ <= (3,):\n            HAYSTACK_CONNECTIONS[\"elasticsearch\"].update(\n                {\n                    \"ENGINE\": \"haystack.backends.elasticsearch2_backend.Elasticsearch2SearchEngine\"\n                }\n            )\n        elif (5,) <= elasticsearch.__version__ <= (6,):\n            HAYSTACK_CONNECTIONS[\"elasticsearch\"].update(\n                {\n                    \"ENGINE\": \"haystack.backends.elasticsearch5_backend.Elasticsearch5SearchEngine\"\n                }\n            )\n    except ImportError:\n        del HAYSTACK_CONNECTIONS[\"elasticsearch\"]\n"
  },
  {
    "path": "test_haystack/simple_tests/__init__.py",
    "content": "import warnings\n\nwarnings.simplefilter(\"ignore\", Warning)\n"
  },
  {
    "path": "test_haystack/simple_tests/search_indexes.py",
    "content": "from haystack import indexes\n\nfrom ..core.models import MockModel, ScoreMockModel\n\n\nclass SimpleMockSearchIndex(indexes.SearchIndex, indexes.Indexable):\n    text = indexes.CharField(document=True, use_template=True)\n    name = indexes.CharField(model_attr=\"author\")\n    pub_date = indexes.DateTimeField(model_attr=\"pub_date\")\n\n    def get_model(self):\n        return MockModel\n\n\nclass SimpleMockScoreIndex(indexes.SearchIndex, indexes.Indexable):\n    text = indexes.CharField(document=True, use_template=True)\n    score = indexes.CharField(model_attr=\"score\")\n\n    def get_model(self):\n        return ScoreMockModel\n\n\nclass SimpleMockUUIDModelIndex(indexes.SearchIndex, indexes.Indexable):\n    text = indexes.CharField(document=True, model_attr=\"characteristics\")\n"
  },
  {
    "path": "test_haystack/simple_tests/test_simple_backend.py",
    "content": "from datetime import date\n\nfrom django.test import TestCase\nfrom django.test.utils import override_settings\n\nfrom haystack import connections\nfrom haystack.query import SearchQuerySet\nfrom haystack.utils.loading import UnifiedIndex\n\nfrom ..core.models import MockModel, OneToManyRightSideModel, ScoreMockModel\nfrom ..mocks import MockSearchResult\nfrom .search_indexes import SimpleMockScoreIndex, SimpleMockSearchIndex\n\n\nclass SimpleSearchBackendTestCase(TestCase):\n    fixtures = [\"base_data.json\", \"bulk_data.json\"]\n\n    def setUp(self):\n        super().setUp()\n\n        self.backend = connections[\"simple\"].get_backend()\n        ui = connections[\"simple\"].get_unified_index()\n        self.index = SimpleMockSearchIndex()\n        ui.build(indexes=[self.index, SimpleMockScoreIndex()])\n        self.sample_objs = MockModel.objects.all()\n\n    def test_update(self):\n        self.backend.update(self.index, self.sample_objs)\n\n    def test_remove(self):\n        self.backend.remove(self.sample_objs[0])\n\n    def test_clear(self):\n        self.backend.clear()\n\n    def test_search(self):\n        # No query string should always yield zero results.\n        self.assertEqual(self.backend.search(\"\"), {\"hits\": 0, \"results\": []})\n\n        self.assertEqual(self.backend.search(\"*\")[\"hits\"], 24)\n        self.assertEqual(\n            sorted([result.pk for result in self.backend.search(\"*\")[\"results\"]]),\n            [\n                1,\n                1,\n                2,\n                3,\n                4,\n                5,\n                6,\n                7,\n                8,\n                9,\n                10,\n                11,\n                12,\n                13,\n                14,\n                15,\n                16,\n                17,\n                18,\n                19,\n                20,\n                21,\n                22,\n                23,\n            ],\n        )\n\n        self.assertEqual(self.backend.search(\"daniel\")[\"hits\"], 23)\n        self.assertEqual(\n            [result.pk for result in self.backend.search(\"daniel\")[\"results\"]],\n            [\n                1,\n                2,\n                3,\n                4,\n                5,\n                6,\n                7,\n                8,\n                9,\n                10,\n                11,\n                12,\n                13,\n                14,\n                15,\n                16,\n                17,\n                18,\n                19,\n                20,\n                21,\n                22,\n                23,\n            ],\n        )\n\n        self.assertEqual(self.backend.search(\"should be a string\")[\"hits\"], 1)\n        self.assertEqual(\n            [\n                result.pk\n                for result in self.backend.search(\"should be a string\")[\"results\"]\n            ],\n            [8],\n        )\n        # Ensure the results are ``SearchResult`` instances...\n        self.assertEqual(\n            self.backend.search(\"should be a string\")[\"results\"][0].score, 0\n        )\n\n        self.assertEqual(self.backend.search(\"index document\")[\"hits\"], 6)\n        self.assertEqual(\n            [result.pk for result in self.backend.search(\"index document\")[\"results\"]],\n            [2, 3, 15, 16, 17, 18],\n        )\n\n        # Regression-ville\n        self.assertEqual(\n            [\n                result.object.id\n                for result in self.backend.search(\"index document\")[\"results\"]\n            ],\n            [2, 3, 15, 16, 17, 18],\n        )\n        self.assertEqual(\n            self.backend.search(\"index document\")[\"results\"][0].model, MockModel\n        )\n\n        # No support for spelling suggestions\n        self.assertEqual(self.backend.search(\"Indx\")[\"hits\"], 0)\n        self.assertFalse(self.backend.search(\"Indx\").get(\"spelling_suggestion\"))\n\n        # No support for facets\n        self.assertEqual(\n            self.backend.search(\"\", facets=[\"name\"]), {\"hits\": 0, \"results\": []}\n        )\n        self.assertEqual(self.backend.search(\"daniel\", facets=[\"name\"])[\"hits\"], 23)\n        self.assertEqual(\n            self.backend.search(\n                \"\",\n                date_facets={\n                    \"pub_date\": {\n                        \"start_date\": date(2008, 2, 26),\n                        \"end_date\": date(2008, 2, 26),\n                        \"gap\": \"/MONTH\",\n                    }\n                },\n            ),\n            {\"hits\": 0, \"results\": []},\n        )\n        self.assertEqual(\n            self.backend.search(\n                \"daniel\",\n                date_facets={\n                    \"pub_date\": {\n                        \"start_date\": date(2008, 2, 26),\n                        \"end_date\": date(2008, 2, 26),\n                        \"gap\": \"/MONTH\",\n                    }\n                },\n            )[\"hits\"],\n            23,\n        )\n        self.assertEqual(\n            self.backend.search(\"\", query_facets={\"name\": \"[* TO e]\"}),\n            {\"hits\": 0, \"results\": []},\n        )\n        self.assertEqual(\n            self.backend.search(\"daniel\", query_facets={\"name\": \"[* TO e]\"})[\"hits\"], 23\n        )\n        self.assertFalse(self.backend.search(\"\").get(\"facets\"))\n        self.assertFalse(self.backend.search(\"daniel\").get(\"facets\"))\n\n        # Note that only textual-fields are supported.\n        self.assertEqual(self.backend.search(\"2009-06-18\")[\"hits\"], 0)\n\n        # Ensure that swapping the ``result_class`` works.\n        self.assertTrue(\n            isinstance(\n                self.backend.search(\"index document\", result_class=MockSearchResult)[\n                    \"results\"\n                ][0],\n                MockSearchResult,\n            )\n        )\n\n        # Ensure empty queries does not raise.\n        self.assertEqual(\n            self.backend.search(\"foo\", models=[OneToManyRightSideModel]),\n            {\"hits\": 0, \"results\": []},\n        )\n\n    def test_filter_models(self):\n        self.backend.update(self.index, self.sample_objs)\n        self.assertEqual(self.backend.search(\"*\", models=set([]))[\"hits\"], 24)\n        self.assertEqual(self.backend.search(\"*\", models=set([MockModel]))[\"hits\"], 23)\n\n    def test_more_like_this(self):\n        self.backend.update(self.index, self.sample_objs)\n        self.assertEqual(self.backend.search(\"*\")[\"hits\"], 24)\n\n        # Unsupported by 'simple'. Should see empty results.\n        self.assertEqual(self.backend.more_like_this(self.sample_objs[0])[\"hits\"], 0)\n\n    def test_score_field_collision(self):\n\n        index = connections[\"simple\"].get_unified_index().get_index(ScoreMockModel)\n        sample_objs = ScoreMockModel.objects.all()\n\n        self.backend.update(index, self.sample_objs)\n\n        # 42 is the in the match, which will be removed from the result\n        self.assertEqual(self.backend.search(\"42\")[\"results\"][0].score, 0)\n\n\n@override_settings(DEBUG=True)\nclass LiveSimpleSearchQuerySetTestCase(TestCase):\n    fixtures = [\"base_data.json\", \"bulk_data.json\"]\n\n    def setUp(self):\n        super().setUp()\n\n        # Stow.\n        self.old_ui = connections[\"simple\"].get_unified_index()\n        self.ui = UnifiedIndex()\n        self.smmi = SimpleMockSearchIndex()\n        self.ui.build(indexes=[self.smmi])\n        connections[\"simple\"]._index = self.ui\n\n        self.sample_objs = MockModel.objects.all()\n        self.sqs = SearchQuerySet(using=\"simple\")\n\n    def tearDown(self):\n        # Restore.\n        connections[\"simple\"]._index = self.old_ui\n        super().tearDown()\n\n    def test_general_queries(self):\n        # For now, just make sure these don't throw an exception.\n        # They won't work until the simple backend is improved.\n        self.assertTrue(len(self.sqs.auto_query(\"daniel\")) > 0)\n        self.assertTrue(len(self.sqs.filter(text=\"index\")) > 0)\n        self.assertTrue(len(self.sqs.exclude(name=\"daniel\")) > 0)\n        self.assertTrue(len(self.sqs.order_by(\"-pub_date\")) > 0)\n\n    def test_general_queries_unicode(self):\n        self.assertEqual(len(self.sqs.auto_query(\"Привет\")), 0)\n\n    def test_more_like_this(self):\n        # MLT shouldn't be horribly broken. This used to throw an exception.\n        mm1 = MockModel.objects.get(pk=1)\n        self.assertEqual(len(self.sqs.filter(text=1).more_like_this(mm1)), 0)\n\n    def test_values_queries(self):\n        sqs = self.sqs.auto_query(\"daniel\")\n        self.assertTrue(len(sqs) > 0)\n\n        flat_scores = sqs.values_list(\"score\", flat=True)\n        self.assertEqual(flat_scores[0], 0)\n\n        scores = sqs.values_list(\"id\", \"score\")\n        self.assertEqual(scores[0], [1, 0])\n\n        scores_dict = sqs.values(\"id\", \"score\")\n        self.assertEqual(scores_dict[0], {\"id\": 1, \"score\": 0})\n"
  },
  {
    "path": "test_haystack/simple_tests/test_simple_query.py",
    "content": "from django.test import TestCase\n\nfrom haystack import connections\nfrom haystack.models import SearchResult\nfrom haystack.query import SQ\n\n\nclass SimpleSearchQueryTestCase(TestCase):\n    def setUp(self):\n        super().setUp()\n        self.sq = connections[\"simple\"].get_query()\n\n    def test_build_query_all(self):\n        self.assertEqual(self.sq.build_query(), \"*\")\n\n    def test_build_query_single_word(self):\n        self.sq.add_filter(SQ(content=\"hello\"))\n        self.assertEqual(self.sq.build_query(), \"hello\")\n\n    def test_build_query_multiple_word(self):\n        self.sq.add_filter(SQ(name=\"foo\"))\n        self.sq.add_filter(SQ(name=\"bar\"))\n        self.assertEqual(self.sq.build_query(), \"foo bar\")\n\n    def test_set_result_class(self):\n        # Assert that we're defaulting to ``SearchResult``.\n        self.assertTrue(issubclass(self.sq.result_class, SearchResult))\n\n        # Custom class.\n        class IttyBittyResult(object):\n            pass\n\n        self.sq.set_result_class(IttyBittyResult)\n        self.assertTrue(issubclass(self.sq.result_class, IttyBittyResult))\n\n        # Reset to default.\n        self.sq.set_result_class(None)\n        self.assertTrue(issubclass(self.sq.result_class, SearchResult))\n"
  },
  {
    "path": "test_haystack/solr_tests/__init__.py",
    "content": "import warnings\n\nwarnings.simplefilter(\"ignore\", Warning)\n\nfrom ..utils import check_solr\n\n\ndef setup():\n    check_solr()\n"
  },
  {
    "path": "test_haystack/solr_tests/server/.gitignore",
    "content": "solr-*.tgz\n"
  },
  {
    "path": "test_haystack/solr_tests/server/confdir/schema.xml",
    "content": "<?xml version=\"1.0\" encoding=\"UTF-8\" ?>\n<!--\n Licensed to the Apache Software Foundation (ASF) under one or more\n contributor license agreements.  See the NOTICE file distributed with\n this work for additional information regarding copyright ownership.\n The ASF licenses this file to You under the Apache License, Version 2.0\n (the \"License\"); you may not use this file except in compliance with\n the License.  You may obtain a copy of the License at\n\n     http://www.apache.org/licenses/LICENSE-2.0\n\n Unless required by applicable law or agreed to in writing, software\n distributed under the License is distributed on an \"AS IS\" BASIS,\n WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n See the License for the specific language governing permissions and\n limitations under the License.\n-->\n\n<!--\n This is the Solr schema file. This file should be named \"schema.xml\" and\n should be in the conf directory under the solr home\n (i.e. ./solr/conf/schema.xml by default)\n or located where the classloader for the Solr webapp can find it.\n\n This example schema is the recommended starting point for users.\n It should be kept correct and concise, usable out-of-the-box.\n\n For more information, on how to customize this file, please see\n http://wiki.apache.org/solr/SchemaXml\n\n PERFORMANCE NOTE: this schema includes many optional features and should not\n be used for benchmarking.  To improve performance one could\n  - set stored=\"false\" for all fields possible (esp large fields) when you\n    only need to search on the field but don't need to return the original\n    value.\n  - set indexed=\"false\" if you don't need to search on the field, but only\n    return the field as a result of searching on other indexed fields.\n  - remove all unneeded copyField statements\n  - for best index size and searching performance, set \"index\" to false\n    for all general text fields, use copyField to copy them to the\n    catchall \"text\" field, and use that for searching.\n  - For maximum indexing performance, use the ConcurrentUpdateSolrServer\n    java client.\n  - Remember to run the JVM in server mode, and use a higher logging level\n    that avoids logging every request\n-->\n\n<schema name=\"haystack-schema\" version=\"1.6\">\n\n    <!--\n    ######################## django-haystack specifics begin ########################\n    -->\n\n    <fieldType name=\"edge_ngram\" class=\"solr.TextField\" positionIncrementGap=\"1\">\n        <analyzer type=\"index\">\n            <tokenizer class=\"solr.WhitespaceTokenizerFactory\" />\n            <filter class=\"solr.LowerCaseFilterFactory\" />\n            <filter class=\"solr.WordDelimiterGraphFilterFactory\" generateWordParts=\"1\" generateNumberParts=\"1\" catenateWords=\"0\" catenateNumbers=\"0\" catenateAll=\"0\" splitOnCaseChange=\"1\"/>\n            <filter class=\"solr.EdgeNGramFilterFactory\" minGramSize=\"2\" maxGramSize=\"15\" />\n        </analyzer>\n        <analyzer type=\"query\">\n            <tokenizer class=\"solr.WhitespaceTokenizerFactory\" />\n            <filter class=\"solr.LowerCaseFilterFactory\" />\n            <filter class=\"solr.WordDelimiterGraphFilterFactory\" generateWordParts=\"1\" generateNumberParts=\"1\" catenateWords=\"0\" catenateNumbers=\"0\" catenateAll=\"0\" splitOnCaseChange=\"1\"/>\n        </analyzer>\n    </fieldType>\n\n    <fieldType name=\"ngram\" class=\"solr.TextField\" >\n        <analyzer type=\"index\">\n            <tokenizer class=\"solr.KeywordTokenizerFactory\"/>\n            <filter class=\"solr.LowerCaseFilterFactory\"/>\n            <filter class=\"solr.NGramFilterFactory\" minGramSize=\"3\" maxGramSize=\"15\" />\n        </analyzer>\n        <analyzer type=\"query\">\n            <tokenizer class=\"solr.KeywordTokenizerFactory\"/>\n            <filter class=\"solr.LowerCaseFilterFactory\"/>\n        </analyzer>\n    </fieldType>\n\n    <field name=\"id\" type=\"string\" indexed=\"true\" stored=\"true\" multiValued=\"false\" required=\"true\"/>\n    <field name=\"django_ct\" type=\"string\" indexed=\"true\" stored=\"true\" multiValued=\"false\"/>\n    <field name=\"django_id\" type=\"string\" indexed=\"true\" stored=\"true\" multiValued=\"false\"/>\n\n    <uniqueKey>id</uniqueKey>\n\n    <field name=\"author\" type=\"string\" indexed=\"true\" stored=\"true\" multiValued=\"false\" omitNorms=\"false\"/>\n    <field name=\"average_rating\" type=\"floats\" indexed=\"true\" stored=\"true\" multiValued=\"false\"/>\n    <field name=\"cat\" type=\"text_ws\" indexed=\"true\" stored=\"true\" multiValued=\"true\" omitNorms=\"true\"/>\n    <field name=\"comment\" type=\"text_en\" indexed=\"true\" stored=\"true\" multiValued=\"false\"/>\n    <field name=\"created\" type=\"date\" indexed=\"true\" stored=\"true\" multiValued=\"false\"/>\n    <field name=\"editor\" type=\"string\" indexed=\"true\" stored=\"true\" multiValued=\"false\" omitNorms=\"false\"/>\n    <field name=\"is_active\" type=\"boolean\" indexed=\"true\" stored=\"true\" multiValued=\"false\"/>\n    <field name=\"location\" type=\"location\" indexed=\"true\" stored=\"true\" multiValued=\"false\"/>\n    <field name=\"month\" type=\"string\" indexed=\"false\" stored=\"true\" multiValued=\"false\"/>\n    <field name=\"name\" type=\"string\" indexed=\"true\" stored=\"true\" multiValued=\"false\"/>\n    <field name=\"name_auto\" type=\"edge_ngram\" indexed=\"true\" stored=\"true\" multiValued=\"false\"/>\n    <field name=\"name_exact\" type=\"string\" indexed=\"true\" stored=\"true\" multiValued=\"false\"/>\n    <field name=\"post_count\" type=\"ints\" indexed=\"true\" stored=\"true\" multiValued=\"false\"/>\n    <field name=\"price\" type=\"string\" indexed=\"true\" stored=\"true\" multiValued=\"false\"/>\n    <field name=\"pub_date\" type=\"date\" indexed=\"true\" stored=\"true\" multiValued=\"false\"/>\n    <field name=\"sites\" type=\"ints\" indexed=\"true\" stored=\"true\" multiValued=\"true\"/>\n    <field name=\"tags\" type=\"text_en\" indexed=\"true\" stored=\"true\" multiValued=\"true\"/>\n    <field name=\"text\" type=\"text_en\" indexed=\"true\" stored=\"true\" multiValued=\"false\"/>\n    <field name=\"text_auto\" type=\"edge_ngram\" indexed=\"true\" stored=\"true\" multiValued=\"false\"/>\n    <field name=\"type\" type=\"string\" indexed=\"true\" stored=\"true\" multiValued=\"false\"/>\n    <field name=\"username\" type=\"string\" indexed=\"true\" stored=\"true\" multiValued=\"false\"/>\n    <!--\n    ######################## django-haystack specifics end ########################\n    -->\n\n    <!-- attribute \"name\" is the name of this schema and is only used for display purposes.\n       version=\"x.y\" is Solr's version number for the schema syntax and\n       semantics.  It should not normally be changed by applications.\n\n       1.0: multiValued attribute did not exist, all fields are multiValued\n            by nature\n       1.1: multiValued attribute introduced, false by default\n       1.2: omitTermFreqAndPositions attribute introduced, true by default\n            except for text fields.\n       1.3: removed optional field compress feature\n       1.4: autoGeneratePhraseQueries attribute introduced to drive QueryParser\n            behavior when a single string produces multiple tokens.  Defaults\n            to off for version >= 1.4\n       1.5: omitNorms defaults to true for primitive field types\n            (int, float, boolean, string...)\n       1.6: useDocValuesAsStored defaults to true.\n    -->\n\n    <!-- Valid attributes for fields:\n     name: mandatory - the name for the field\n     type: mandatory - the name of a field type from the\n       fieldTypes section\n     indexed: true if this field should be indexed (searchable or sortable)\n     stored: true if this field should be retrievable\n     docValues: true if this field should have doc values. Doc values are\n       useful for faceting, grouping, sorting and function queries. Although not\n       required, doc values will make the index faster to load, more\n       NRT-friendly and more memory-efficient. They however come with some\n       limitations: they are currently only supported by StrField, UUIDField\n       and all Trie*Fields, and depending on the field type, they might\n       require the field to be single-valued, be required or have a default\n       value (check the documentation of the field type you're interested in\n       for more information)\n     multiValued: true if this field may contain multiple values per document\n     omitNorms: (expert) set to true to omit the norms associated with\n       this field (this disables length normalization and index-time\n       boosting for the field, and saves some memory).  Only full-text\n       fields or fields that need an index-time boost need norms.\n       Norms are omitted for primitive (non-analyzed) types by default.\n     termVectors: [false] set to true to store the term vector for a\n       given field.\n       When using MoreLikeThis, fields used for similarity should be\n       stored for best performance.\n     termPositions: Store position information with the term vector.\n       This will increase storage costs.\n     termOffsets: Store offset information with the term vector. This\n       will increase storage costs.\n     required: The field is required.  It will throw an error if the\n       value does not exist\n     default: a value that should be used if no value is specified\n       when adding a document.\n    -->\n\n    <!-- field names should consist of alphanumeric or underscore characters only and\n      not start with a digit.  This is not currently strictly enforced,\n      but other field names will not have first class support from all components\n      and back compatibility is not guaranteed.  Names with both leading and\n      trailing underscores (e.g. _version_) are reserved.\n    -->\n\n    <!-- In this data_driven_schema_configs configset, only three fields are pre-declared:\n         id, _version_, and _text_.  All other fields will be type guessed and added via the\n         \"add-unknown-fields-to-the-schema\" update request processor chain declared\n         in solrconfig.xml.\n\n         Note that many dynamic fields are also defined - you can use them to specify a\n         field's type via field naming conventions - see below.\n\n         WARNING: The _text_ catch-all field will significantly increase your index size.\n         If you don't need it, consider removing it and the corresponding copyField directive.\n    -->\n\n    <field name=\"_version_\" type=\"long\" indexed=\"true\" stored=\"false\"/>\n    <field name=\"_root_\" type=\"string\" indexed=\"true\" stored=\"false\" docValues=\"false\" />\n    <!--<field name=\"_text_\" type=\"edge_ngram\" indexed=\"true\" stored=\"false\" multiValued=\"true\"/>\n    <copyField source=\"*\" dest=\"_text_\"/>-->\n\n\n    <!-- Dynamic field definitions allow using convention over configuration\n       for fields via the specification of patterns to match field names.\n       EXAMPLE:  name=\"*_i\" will match any field ending in _i (like myid_i, z_i)\n       RESTRICTION: the glob-like pattern in the name attribute must have\n       a \"*\" only at the start or the end.  -->\n\n    <dynamicField name=\"*_i\"  type=\"int\"    indexed=\"true\"  stored=\"true\"/>\n    <dynamicField name=\"*_is\" type=\"ints\"    indexed=\"true\"  stored=\"true\"/>\n    <dynamicField name=\"*_s\"  type=\"string\"  indexed=\"true\"  stored=\"true\" />\n    <dynamicField name=\"*_ss\" type=\"strings\"  indexed=\"true\"  stored=\"true\"/>\n    <dynamicField name=\"*_l\"  type=\"long\"   indexed=\"true\"  stored=\"true\"/>\n    <dynamicField name=\"*_ls\" type=\"longs\"   indexed=\"true\"  stored=\"true\"/>\n    <dynamicField name=\"*_t\"   type=\"text_general\" indexed=\"true\" stored=\"true\"/>\n    <dynamicField name=\"*_txt\" type=\"text_general\" indexed=\"true\" stored=\"true\"/>\n    <dynamicField name=\"*_b\"  type=\"boolean\" indexed=\"true\" stored=\"true\"/>\n    <dynamicField name=\"*_bs\" type=\"booleans\" indexed=\"true\" stored=\"true\"/>\n    <dynamicField name=\"*_f\"  type=\"float\"  indexed=\"true\"  stored=\"true\"/>\n    <dynamicField name=\"*_fs\" type=\"floats\"  indexed=\"true\"  stored=\"true\"/>\n    <dynamicField name=\"*_d\"  type=\"double\" indexed=\"true\"  stored=\"true\"/>\n    <dynamicField name=\"*_ds\" type=\"doubles\" indexed=\"true\"  stored=\"true\"/>\n\n    <!-- Type used to index the lat and lon components for the \"location\" FieldType -->\n    <dynamicField name=\"*_coordinate\"  type=\"tdouble\" indexed=\"true\"  stored=\"false\" useDocValuesAsStored=\"false\" />\n\n    <dynamicField name=\"*_dt\"  type=\"date\"    indexed=\"true\"  stored=\"true\"/>\n    <dynamicField name=\"*_dts\" type=\"date\"    indexed=\"true\"  stored=\"true\" multiValued=\"true\"/>\n    <dynamicField name=\"*_p\"  type=\"location\" indexed=\"true\" stored=\"true\"/>\n    <dynamicField name=\"*_srpt\"  type=\"location_rpt\" indexed=\"true\" stored=\"true\"/>\n\n    <!-- some trie-coded dynamic fields for faster range queries -->\n    <dynamicField name=\"*_ti\" type=\"tint\"    indexed=\"true\"  stored=\"true\"/>\n    <dynamicField name=\"*_tis\" type=\"tints\"    indexed=\"true\"  stored=\"true\"/>\n    <dynamicField name=\"*_tl\" type=\"tlong\"   indexed=\"true\"  stored=\"true\"/>\n    <dynamicField name=\"*_tls\" type=\"tlongs\"   indexed=\"true\"  stored=\"true\"/>\n    <dynamicField name=\"*_tf\" type=\"tfloat\"  indexed=\"true\"  stored=\"true\"/>\n    <dynamicField name=\"*_tfs\" type=\"tfloats\"  indexed=\"true\"  stored=\"true\"/>\n    <dynamicField name=\"*_td\" type=\"tdouble\" indexed=\"true\"  stored=\"true\"/>\n    <dynamicField name=\"*_tds\" type=\"tdoubles\" indexed=\"true\"  stored=\"true\"/>\n    <dynamicField name=\"*_tdt\" type=\"tdate\"  indexed=\"true\"  stored=\"true\"/>\n    <dynamicField name=\"*_tdts\" type=\"tdates\"  indexed=\"true\"  stored=\"true\"/>\n\n    <dynamicField name=\"*_c\"   type=\"currency\" indexed=\"true\"  stored=\"true\"/>\n\n    <dynamicField name=\"ignored_*\" type=\"ignored\" multiValued=\"true\"/>\n    <dynamicField name=\"attr_*\" type=\"text_general\" indexed=\"true\" stored=\"true\" multiValued=\"true\"/>\n\n    <dynamicField name=\"random_*\" type=\"random\" />\n\n    <!-- uncomment the following to ignore any fields that don't already match an existing\n        field name or dynamic field, rather than reporting them as an error.\n        alternately, change the type=\"ignored\" to some other type e.g. \"text\" if you want\n        unknown fields indexed and/or stored by default\n\n        NB: use of \"*\" dynamic fields will disable field type guessing and adding\n        unknown fields to the schema. -->\n    <!--dynamicField name=\"*\" type=\"ignored\" multiValued=\"true\" /-->\n\n    <!-- Field to use to determine and enforce document uniqueness.\n      Unless this field is marked with required=\"false\", it will be a required field\n    -->\n    <uniqueKey>id</uniqueKey>\n\n    <!-- copyField commands copy one field to another at the time a document\n       is added to the index.  It's used either to index the same field differently,\n       or to add multiple fields to the same field for easier/faster searching.\n\n    <copyField source=\"sourceFieldName\" dest=\"destinationFieldName\"/>\n    -->\n\n    <!-- field type definitions. The \"name\" attribute is\n       just a label to be used by field definitions.  The \"class\"\n       attribute and any other attributes determine the real\n       behavior of the fieldType.\n         Class names starting with \"solr\" refer to java classes in a\n       standard package such as org.apache.solr.analysis\n    -->\n\n    <!-- The StrField type is not analyzed, but indexed/stored verbatim.\n       It supports doc values but in that case the field needs to be\n       single-valued and either required or have a default value.\n      -->\n    <fieldType name=\"string\" class=\"solr.StrField\" sortMissingLast=\"true\" docValues=\"true\" />\n    <fieldType name=\"strings\" class=\"solr.StrField\" sortMissingLast=\"true\" multiValued=\"true\" docValues=\"true\" />\n\n    <!-- boolean type: \"true\" or \"false\" -->\n    <fieldType name=\"boolean\" class=\"solr.BoolField\" sortMissingLast=\"true\"/>\n\n    <fieldType name=\"booleans\" class=\"solr.BoolField\" sortMissingLast=\"true\" multiValued=\"true\"/>\n\n    <!-- sortMissingLast and sortMissingFirst attributes are optional attributes are\n         currently supported on types that are sorted internally as strings\n         and on numeric types.\n\t     This includes \"string\",\"boolean\", and, as of 3.5 (and 4.x),\n\t     int, float, long, date, double, including the \"Trie\" variants.\n       - If sortMissingLast=\"true\", then a sort on this field will cause documents\n         without the field to come after documents with the field,\n         regardless of the requested sort order (asc or desc).\n       - If sortMissingFirst=\"true\", then a sort on this field will cause documents\n         without the field to come before documents with the field,\n         regardless of the requested sort order.\n       - If sortMissingLast=\"false\" and sortMissingFirst=\"false\" (the default),\n         then default lucene sorting will be used which places docs without the\n         field first in an ascending sort and last in a descending sort.\n    -->\n\n    <!--\n      Default numeric field types. For faster range queries, consider the tint/tfloat/tlong/tdouble types.\n\n      These fields support doc values, but they require the field to be\n      single-valued and either be required or have a default value.\n    -->\n    <fieldType name=\"int\" class=\"solr.TrieIntField\" docValues=\"true\" precisionStep=\"0\" positionIncrementGap=\"0\"/>\n    <fieldType name=\"float\" class=\"solr.TrieFloatField\" docValues=\"true\" precisionStep=\"0\" positionIncrementGap=\"0\"/>\n    <fieldType name=\"long\" class=\"solr.TrieLongField\" docValues=\"true\" precisionStep=\"0\" positionIncrementGap=\"0\"/>\n    <fieldType name=\"double\" class=\"solr.TrieDoubleField\" docValues=\"true\" precisionStep=\"0\" positionIncrementGap=\"0\"/>\n\n    <fieldType name=\"ints\" class=\"solr.TrieIntField\" docValues=\"true\" precisionStep=\"0\" positionIncrementGap=\"0\" multiValued=\"true\"/>\n    <fieldType name=\"floats\" class=\"solr.TrieFloatField\" docValues=\"true\" precisionStep=\"0\" positionIncrementGap=\"0\" multiValued=\"true\"/>\n    <fieldType name=\"longs\" class=\"solr.TrieLongField\" docValues=\"true\" precisionStep=\"0\" positionIncrementGap=\"0\" multiValued=\"true\"/>\n    <fieldType name=\"doubles\" class=\"solr.TrieDoubleField\" docValues=\"true\" precisionStep=\"0\" positionIncrementGap=\"0\" multiValued=\"true\"/>\n\n    <!--\n     Numeric field types that index each value at various levels of precision\n     to accelerate range queries when the number of values between the range\n     endpoints is large. See the javadoc for NumericRangeQuery for internal\n     implementation details.\n\n     Smaller precisionStep values (specified in bits) will lead to more tokens\n     indexed per value, slightly larger index size, and faster range queries.\n     A precisionStep of 0 disables indexing at different precision levels.\n    -->\n    <fieldType name=\"tint\" class=\"solr.TrieIntField\" docValues=\"true\" precisionStep=\"8\" positionIncrementGap=\"0\"/>\n    <fieldType name=\"tfloat\" class=\"solr.TrieFloatField\" docValues=\"true\" precisionStep=\"8\" positionIncrementGap=\"0\"/>\n    <fieldType name=\"tlong\" class=\"solr.TrieLongField\" docValues=\"true\" precisionStep=\"8\" positionIncrementGap=\"0\"/>\n    <fieldType name=\"tdouble\" class=\"solr.TrieDoubleField\" docValues=\"true\" precisionStep=\"8\" positionIncrementGap=\"0\"/>\n\n    <fieldType name=\"tints\" class=\"solr.TrieIntField\" docValues=\"true\" precisionStep=\"8\" positionIncrementGap=\"0\" multiValued=\"true\"/>\n    <fieldType name=\"tfloats\" class=\"solr.TrieFloatField\" docValues=\"true\" precisionStep=\"8\" positionIncrementGap=\"0\" multiValued=\"true\"/>\n    <fieldType name=\"tlongs\" class=\"solr.TrieLongField\" docValues=\"true\" precisionStep=\"8\" positionIncrementGap=\"0\" multiValued=\"true\"/>\n    <fieldType name=\"tdoubles\" class=\"solr.TrieDoubleField\" docValues=\"true\" precisionStep=\"8\" positionIncrementGap=\"0\" multiValued=\"true\"/>\n\n    <!-- The format for this date field is of the form 1995-12-31T23:59:59Z, and\n         is a more restricted form of the canonical representation of dateTime\n         http://www.w3.org/TR/xmlschema-2/#dateTime\n         The trailing \"Z\" designates UTC time and is mandatory.\n         Optional fractional seconds are allowed: 1995-12-31T23:59:59.999Z\n         All other components are mandatory.\n\n         Expressions can also be used to denote calculations that should be\n         performed relative to \"NOW\" to determine the value, ie...\n\n               NOW/HOUR\n                  ... Round to the start of the current hour\n               NOW-1DAY\n                  ... Exactly 1 day prior to now\n               NOW/DAY+6MONTHS+3DAYS\n                  ... 6 months and 3 days in the future from the start of\n                      the current day\n\n         Consult the TrieDateField javadocs for more information.\n\n         Note: For faster range queries, consider the tdate type\n      -->\n    <fieldType name=\"date\" class=\"solr.TrieDateField\" docValues=\"true\" precisionStep=\"0\" positionIncrementGap=\"0\"/>\n    <fieldType name=\"dates\" class=\"solr.TrieDateField\" docValues=\"true\" precisionStep=\"0\" positionIncrementGap=\"0\" multiValued=\"true\"/>\n\n    <!-- A Trie based date field for faster date range queries and date faceting. -->\n    <fieldType name=\"tdate\" class=\"solr.TrieDateField\" docValues=\"true\" precisionStep=\"6\" positionIncrementGap=\"0\"/>\n\n    <fieldType name=\"tdates\" class=\"solr.TrieDateField\" docValues=\"true\" precisionStep=\"6\" positionIncrementGap=\"0\" multiValued=\"true\"/>\n\n\n    <!--Binary data type. The data should be sent/retrieved in as Base64 encoded Strings -->\n    <fieldType name=\"binary\" class=\"solr.BinaryField\"/>\n\n    <!-- The \"RandomSortField\" is not used to store or search any\n         data.  You can declare fields of this type it in your schema\n         to generate pseudo-random orderings of your docs for sorting\n         or function purposes.  The ordering is generated based on the field\n         name and the version of the index. As long as the index version\n         remains unchanged, and the same field name is reused,\n         the ordering of the docs will be consistent.\n         If you want different psuedo-random orderings of documents,\n         for the same version of the index, use a dynamicField and\n         change the field name in the request.\n     -->\n    <fieldType name=\"random\" class=\"solr.RandomSortField\" indexed=\"true\" />\n\n    <!-- solr.TextField allows the specification of custom text analyzers\n         specified as a tokenizer and a list of token filters. Different\n         analyzers may be specified for indexing and querying.\n\n         The optional positionIncrementGap puts space between multiple fields of\n         this type on the same document, with the purpose of preventing false phrase\n         matching across fields.\n\n         For more info on customizing your analyzer chain, please see\n         http://wiki.apache.org/solr/AnalyzersTokenizersTokenFilters\n     -->\n\n    <!-- One can also specify an existing Analyzer class that has a\n         default constructor via the class attribute on the analyzer element.\n         Example:\n    <fieldType name=\"text_greek\" class=\"solr.TextField\">\n      <analyzer class=\"org.apache.lucene.analysis.el.GreekAnalyzer\"/>\n    </fieldType>\n    -->\n\n    <!-- A text field that only splits on whitespace for exact matching of words -->\n    <dynamicField name=\"*_ws\" type=\"text_ws\"  indexed=\"true\"  stored=\"true\"/>\n    <fieldType name=\"text_ws\" class=\"solr.TextField\" positionIncrementGap=\"100\">\n        <analyzer>\n            <tokenizer class=\"solr.WhitespaceTokenizerFactory\"/>\n        </analyzer>\n    </fieldType>\n\n    <!-- A general text field that has reasonable, generic\n         cross-language defaults: it tokenizes with StandardTokenizer,\n\t       removes stop words from case-insensitive \"stopwords.txt\"\n\t       (empty by default), and down cases.  At query time only, it\n\t       also applies synonyms.\n\t  -->\n    <fieldType name=\"text_general\" class=\"solr.TextField\" positionIncrementGap=\"100\" multiValued=\"true\">\n        <analyzer type=\"index\">\n            <tokenizer class=\"solr.StandardTokenizerFactory\"/>\n            <filter class=\"solr.StopFilterFactory\" ignoreCase=\"true\" words=\"stopwords.txt\" />\n            <!-- in this example, we will only use synonyms at query time\n            <filter class=\"solr.SynonymFilterFactory\" synonyms=\"index_synonyms.txt\" ignoreCase=\"true\" expand=\"false\"/>\n            -->\n            <filter class=\"solr.LowerCaseFilterFactory\"/>\n        </analyzer>\n        <analyzer type=\"query\">\n            <tokenizer class=\"solr.StandardTokenizerFactory\"/>\n            <filter class=\"solr.StopFilterFactory\" ignoreCase=\"true\" words=\"stopwords.txt\" />\n            <filter class=\"solr.SynonymGraphFilterFactory\" synonyms=\"synonyms.txt\"\n             format=\"solr\" ignoreCase=\"false\" expand=\"true\"\n             tokenizerFactory=\"solr.WhitespaceTokenizerFactory\"/>\n            <filter class=\"solr.LowerCaseFilterFactory\"/>\n        </analyzer>\n    </fieldType>\n\n    <!-- A text field with defaults appropriate for English: it\n         tokenizes with StandardTokenizer, removes English stop words\n         (lang/stopwords_en.txt), down cases, protects words from protwords.txt, and\n         finally applies Porter's stemming.  The query time analyzer\n         also applies synonyms from synonyms.txt. -->\n    <dynamicField name=\"*_txt_en\" type=\"text_en\"  indexed=\"true\"  stored=\"true\"/>\n    <fieldType name=\"text_en\" class=\"solr.TextField\" positionIncrementGap=\"100\">\n        <analyzer type=\"index\">\n            <tokenizer class=\"solr.StandardTokenizerFactory\"/>\n            <!-- in this example, we will only use synonyms at query time\n            <filter class=\"solr.SynonymFilterFactory\" synonyms=\"index_synonyms.txt\" ignoreCase=\"true\" expand=\"false\"/>\n            -->\n            <!-- Case insensitive stop word removal.\n            -->\n            <filter class=\"solr.StopFilterFactory\"\n                    ignoreCase=\"true\"\n                    words=\"lang/stopwords_en.txt\"\n            />\n            <filter class=\"solr.LowerCaseFilterFactory\"/>\n            <filter class=\"solr.EnglishPossessiveFilterFactory\"/>\n            <filter class=\"solr.KeywordMarkerFilterFactory\" protected=\"protwords.txt\"/>\n            <!-- Optionally you may want to use this less aggressive stemmer instead of PorterStemFilterFactory:\n            <filter class=\"solr.EnglishMinimalStemFilterFactory\"/>\n              -->\n            <filter class=\"solr.PorterStemFilterFactory\"/>\n        </analyzer>\n        <analyzer type=\"query\">\n            <tokenizer class=\"solr.StandardTokenizerFactory\"/>\n            <filter class=\"solr.SynonymGraphFilterFactory\" synonyms=\"synonyms.txt\"\n             format=\"solr\" ignoreCase=\"false\" expand=\"true\"\n             tokenizerFactory=\"solr.WhitespaceTokenizerFactory\"/>\n            <filter class=\"solr.StopFilterFactory\"\n                    ignoreCase=\"true\"\n                    words=\"lang/stopwords_en.txt\"\n            />\n            <filter class=\"solr.LowerCaseFilterFactory\"/>\n            <filter class=\"solr.EnglishPossessiveFilterFactory\"/>\n            <filter class=\"solr.KeywordMarkerFilterFactory\" protected=\"protwords.txt\"/>\n            <!-- Optionally you may want to use this less aggressive stemmer instead of PorterStemFilterFactory:\n            <filter class=\"solr.EnglishMinimalStemFilterFactory\"/>\n              -->\n            <filter class=\"solr.PorterStemFilterFactory\"/>\n        </analyzer>\n    </fieldType>\n\n    <!-- A text field with defaults appropriate for English, plus\n         aggressive word-splitting and autophrase features enabled.\n         This field is just like text_en, except it adds\n         WordDelimiterFilter to enable splitting and matching of\n         words on case-change, alpha numeric boundaries, and\n         non-alphanumeric chars.  This means certain compound word\n         cases will work, for example query \"wi fi\" will match\n         document \"WiFi\" or \"wi-fi\".\n    -->\n    <dynamicField name=\"*_txt_en_split\" type=\"text_en_splitting\"  indexed=\"true\"  stored=\"true\"/>\n    <fieldType name=\"text_en_splitting\" class=\"solr.TextField\" positionIncrementGap=\"100\" autoGeneratePhraseQueries=\"true\">\n        <analyzer type=\"index\">\n            <tokenizer class=\"solr.WhitespaceTokenizerFactory\"/>\n            <!-- in this example, we will only use synonyms at query time\n            <filter class=\"solr.SynonymFilterFactory\" synonyms=\"index_synonyms.txt\" ignoreCase=\"true\" expand=\"false\"/>\n            -->\n            <!-- Case insensitive stop word removal.\n            -->\n            <filter class=\"solr.StopFilterFactory\"\n                    ignoreCase=\"true\"\n                    words=\"lang/stopwords_en.txt\"\n            />\n            <filter class=\"solr.WordDelimiterGraphFilterFactory\" generateWordParts=\"1\" generateNumberParts=\"1\" catenateWords=\"1\" catenateNumbers=\"1\" catenateAll=\"0\" splitOnCaseChange=\"1\"/>\n            <filter class=\"solr.LowerCaseFilterFactory\"/>\n            <filter class=\"solr.KeywordMarkerFilterFactory\" protected=\"protwords.txt\"/>\n            <filter class=\"solr.PorterStemFilterFactory\"/>\n        </analyzer>\n        <analyzer type=\"query\">\n            <tokenizer class=\"solr.WhitespaceTokenizerFactory\"/>\n            <filter class=\"solr.SynonymGraphFilterFactory\" synonyms=\"synonyms.txt\"\n             format=\"solr\" ignoreCase=\"false\" expand=\"true\"\n             tokenizerFactory=\"solr.WhitespaceTokenizerFactory\"/>\n            <filter class=\"solr.StopFilterFactory\"\n                    ignoreCase=\"true\"\n                    words=\"lang/stopwords_en.txt\"\n            />\n            <filter class=\"solr.WordDelimiterGraphFilterFactory\" generateWordParts=\"1\" generateNumberParts=\"1\" catenateWords=\"0\" catenateNumbers=\"0\" catenateAll=\"0\" splitOnCaseChange=\"1\"/>\n            <filter class=\"solr.LowerCaseFilterFactory\"/>\n            <filter class=\"solr.KeywordMarkerFilterFactory\" protected=\"protwords.txt\"/>\n            <filter class=\"solr.PorterStemFilterFactory\"/>\n        </analyzer>\n    </fieldType>\n\n    <!-- Less flexible matching, but less false matches.  Probably not ideal for product names,\n         but may be good for SKUs.  Can insert dashes in the wrong place and still match. -->\n    <dynamicField name=\"*_txt_en_split_tight\" type=\"text_en_splitting_tight\"  indexed=\"true\"  stored=\"true\"/>\n    <fieldType name=\"text_en_splitting_tight\" class=\"solr.TextField\" positionIncrementGap=\"100\" autoGeneratePhraseQueries=\"true\">\n        <analyzer>\n            <tokenizer class=\"solr.WhitespaceTokenizerFactory\"/>\n            <filter class=\"solr.SynonymGraphFilterFactory\" synonyms=\"synonyms.txt\"\n             format=\"solr\" ignoreCase=\"false\" expand=\"false\"\n             tokenizerFactory=\"solr.WhitespaceTokenizerFactory\"/>\n            <filter class=\"solr.StopFilterFactory\" ignoreCase=\"true\" words=\"lang/stopwords_en.txt\"/>\n            <filter class=\"solr.WordDelimiterGraphFilterFactory\" generateWordParts=\"0\" generateNumberParts=\"0\" catenateWords=\"1\" catenateNumbers=\"1\" catenateAll=\"0\"/>\n            <filter class=\"solr.LowerCaseFilterFactory\"/>\n            <filter class=\"solr.KeywordMarkerFilterFactory\" protected=\"protwords.txt\"/>\n            <filter class=\"solr.EnglishMinimalStemFilterFactory\"/>\n            <!-- this filter can remove any duplicate tokens that appear at the same position - sometimes\n                 possible with WordDelimiterFilter in conjuncton with stemming. -->\n            <filter class=\"solr.RemoveDuplicatesTokenFilterFactory\"/>\n        </analyzer>\n    </fieldType>\n\n    <!-- Just like text_general except it reverses the characters of\n\t       each token, to enable more efficient leading wildcard queries.\n    -->\n    <dynamicField name=\"*_txt_rev\" type=\"text_general_rev\"  indexed=\"true\"  stored=\"true\"/>\n    <fieldType name=\"text_general_rev\" class=\"solr.TextField\" positionIncrementGap=\"100\">\n        <analyzer type=\"index\">\n            <tokenizer class=\"solr.StandardTokenizerFactory\"/>\n            <filter class=\"solr.StopFilterFactory\" ignoreCase=\"true\" words=\"stopwords.txt\" />\n            <filter class=\"solr.LowerCaseFilterFactory\"/>\n            <filter class=\"solr.ReversedWildcardFilterFactory\" withOriginal=\"true\"\n                    maxPosAsterisk=\"3\" maxPosQuestion=\"2\" maxFractionAsterisk=\"0.33\"/>\n        </analyzer>\n        <analyzer type=\"query\">\n            <tokenizer class=\"solr.StandardTokenizerFactory\"/>\n            <filter class=\"solr.SynonymGraphFilterFactory\" synonyms=\"synonyms.txt\"\n             format=\"solr\" ignoreCase=\"false\" expand=\"true\"\n             tokenizerFactory=\"solr.WhitespaceTokenizerFactory\"/>\n            <filter class=\"solr.StopFilterFactory\" ignoreCase=\"true\" words=\"stopwords.txt\" />\n            <filter class=\"solr.LowerCaseFilterFactory\"/>\n        </analyzer>\n    </fieldType>\n\n    <dynamicField name=\"*_phon_en\" type=\"phonetic_en\"  indexed=\"true\"  stored=\"true\"/>\n    <fieldType name=\"phonetic_en\" stored=\"false\" indexed=\"true\" class=\"solr.TextField\" >\n        <analyzer>\n            <tokenizer class=\"solr.StandardTokenizerFactory\"/>\n            <filter class=\"solr.DoubleMetaphoneFilterFactory\" inject=\"false\"/>\n        </analyzer>\n    </fieldType>\n\n    <!-- lowercases the entire field value, keeping it as a single token.  -->\n    <dynamicField name=\"*_s_lower\" type=\"lowercase\"  indexed=\"true\"  stored=\"true\"/>\n    <fieldType name=\"lowercase\" class=\"solr.TextField\" positionIncrementGap=\"100\">\n        <analyzer>\n            <tokenizer class=\"solr.KeywordTokenizerFactory\"/>\n            <filter class=\"solr.LowerCaseFilterFactory\" />\n        </analyzer>\n    </fieldType>\n\n    <!--\n      Example of using PathHierarchyTokenizerFactory at index time, so\n      queries for paths match documents at that path, or in descendent paths\n    -->\n    <dynamicField name=\"*_descendent_path\" type=\"descendent_path\"  indexed=\"true\"  stored=\"true\"/>\n    <fieldType name=\"descendent_path\" class=\"solr.TextField\">\n        <analyzer type=\"index\">\n            <tokenizer class=\"solr.PathHierarchyTokenizerFactory\" delimiter=\"/\" />\n        </analyzer>\n        <analyzer type=\"query\">\n            <tokenizer class=\"solr.KeywordTokenizerFactory\" />\n        </analyzer>\n    </fieldType>\n\n    <!--\n      Example of using PathHierarchyTokenizerFactory at query time, so\n      queries for paths match documents at that path, or in ancestor paths\n    -->\n    <dynamicField name=\"*_ancestor_path\" type=\"ancestor_path\"  indexed=\"true\"  stored=\"true\"/>\n    <fieldType name=\"ancestor_path\" class=\"solr.TextField\">\n        <analyzer type=\"index\">\n            <tokenizer class=\"solr.KeywordTokenizerFactory\" />\n        </analyzer>\n        <analyzer type=\"query\">\n            <tokenizer class=\"solr.PathHierarchyTokenizerFactory\" delimiter=\"/\" />\n        </analyzer>\n    </fieldType>\n\n    <!-- since fields of this type are by default not stored or indexed,\n         any data added to them will be ignored outright.  -->\n    <fieldType name=\"ignored\" stored=\"false\" indexed=\"false\" docValues=\"false\" multiValued=\"true\" class=\"solr.StrField\" />\n\n    <!-- This point type indexes the coordinates as separate fields (subFields)\n      If subFieldType is defined, it references a type, and a dynamic field\n      definition is created matching *___<typename>.  Alternately, if\n      subFieldSuffix is defined, that is used to create the subFields.\n      Example: if subFieldType=\"double\", then the coordinates would be\n        indexed in fields myloc_0___double,myloc_1___double.\n      Example: if subFieldSuffix=\"_d\" then the coordinates would be indexed\n        in fields myloc_0_d,myloc_1_d\n      The subFields are an implementation detail of the fieldType, and end\n      users normally should not need to know about them.\n     -->\n    <dynamicField name=\"*_point\" type=\"point\"  indexed=\"true\"  stored=\"true\"/>\n    <fieldType name=\"point\" class=\"solr.PointType\" dimension=\"2\" subFieldSuffix=\"_d\"/>\n\n    <!-- A specialized field for geospatial search. If indexed, this fieldType must not be multivalued. -->\n    <fieldType name=\"location\" class=\"solr.LatLonType\" subFieldSuffix=\"_coordinate\"/>\n\n    <!-- An alternative geospatial field type new to Solr 4.  It supports multiValued and polygon shapes.\n      For more information about this and other Spatial fields new to Solr 4, see:\n      http://wiki.apache.org/solr/SolrAdaptersForLuceneSpatial4\n    -->\n    <fieldType name=\"location_rpt\" class=\"solr.SpatialRecursivePrefixTreeFieldType\"\n               geo=\"true\" distErrPct=\"0.025\" maxDistErr=\"0.001\" distanceUnits=\"kilometers\" />\n\n    <!-- Money/currency field type. See http://wiki.apache.org/solr/MoneyFieldType\n        Parameters:\n          defaultCurrency: Specifies the default currency if none specified. Defaults to \"USD\"\n          precisionStep:   Specifies the precisionStep for the TrieLong field used for the amount\n          providerClass:   Lets you plug in other exchange provider backend:\n                           solr.FileExchangeRateProvider is the default and takes one parameter:\n                             currencyConfig: name of an xml file holding exchange rates\n                           solr.OpenExchangeRatesOrgProvider uses rates from openexchangerates.org:\n                             ratesFileLocation: URL or path to rates JSON file (default latest.json on the web)\n                             refreshInterval: Number of minutes between each rates fetch (default: 1440, min: 60)\n    -->\n    <fieldType name=\"currency\" class=\"solr.CurrencyField\" precisionStep=\"8\" defaultCurrency=\"USD\" currencyConfig=\"currency.xml\" />\n\n\n\n    <!-- some examples for different languages (generally ordered by ISO code) -->\n\n    <!-- Arabic -->\n    <dynamicField name=\"*_txt_ar\" type=\"text_ar\"  indexed=\"true\"  stored=\"true\"/>\n    <fieldType name=\"text_ar\" class=\"solr.TextField\" positionIncrementGap=\"100\">\n        <analyzer>\n            <tokenizer class=\"solr.StandardTokenizerFactory\"/>\n            <!-- for any non-arabic -->\n            <filter class=\"solr.LowerCaseFilterFactory\"/>\n            <filter class=\"solr.StopFilterFactory\" ignoreCase=\"true\" words=\"lang/stopwords_ar.txt\" />\n\n            <filter class=\"solr.ArabicNormalizationFilterFactory\"/>\n            <filter class=\"solr.ArabicStemFilterFactory\"/>\n        </analyzer>\n    </fieldType>\n\n    <!-- Bulgarian -->\n    <dynamicField name=\"*_txt_bg\" type=\"text_bg\"  indexed=\"true\"  stored=\"true\"/>\n    <fieldType name=\"text_bg\" class=\"solr.TextField\" positionIncrementGap=\"100\">\n        <analyzer>\n            <tokenizer class=\"solr.StandardTokenizerFactory\"/>\n            <filter class=\"solr.LowerCaseFilterFactory\"/>\n            <filter class=\"solr.StopFilterFactory\" ignoreCase=\"true\" words=\"lang/stopwords_bg.txt\" />\n            <filter class=\"solr.BulgarianStemFilterFactory\"/>\n        </analyzer>\n    </fieldType>\n\n    <!-- Catalan -->\n    <dynamicField name=\"*_txt_ca\" type=\"text_ca\"  indexed=\"true\"  stored=\"true\"/>\n    <fieldType name=\"text_ca\" class=\"solr.TextField\" positionIncrementGap=\"100\">\n        <analyzer>\n            <tokenizer class=\"solr.StandardTokenizerFactory\"/>\n            <!-- removes l', etc -->\n            <filter class=\"solr.ElisionFilterFactory\" ignoreCase=\"true\" articles=\"lang/contractions_ca.txt\"/>\n            <filter class=\"solr.LowerCaseFilterFactory\"/>\n            <filter class=\"solr.StopFilterFactory\" ignoreCase=\"true\" words=\"lang/stopwords_ca.txt\" />\n            <filter class=\"solr.SnowballPorterFilterFactory\" language=\"Catalan\"/>\n        </analyzer>\n    </fieldType>\n\n    <!-- CJK bigram (see text_ja for a Japanese configuration using morphological analysis) -->\n    <dynamicField name=\"*_txt_cjk\" type=\"text_cjk\"  indexed=\"true\"  stored=\"true\"/>\n    <fieldType name=\"text_cjk\" class=\"solr.TextField\" positionIncrementGap=\"100\">\n        <analyzer>\n            <tokenizer class=\"solr.StandardTokenizerFactory\"/>\n            <!-- normalize width before bigram, as e.g. half-width dakuten combine  -->\n            <filter class=\"solr.CJKWidthFilterFactory\"/>\n            <!-- for any non-CJK -->\n            <filter class=\"solr.LowerCaseFilterFactory\"/>\n            <filter class=\"solr.CJKBigramFilterFactory\"/>\n        </analyzer>\n    </fieldType>\n\n    <!-- Czech -->\n    <dynamicField name=\"*_txt_cz\" type=\"text_cz\"  indexed=\"true\"  stored=\"true\"/>\n    <fieldType name=\"text_cz\" class=\"solr.TextField\" positionIncrementGap=\"100\">\n        <analyzer>\n            <tokenizer class=\"solr.StandardTokenizerFactory\"/>\n            <filter class=\"solr.LowerCaseFilterFactory\"/>\n            <filter class=\"solr.StopFilterFactory\" ignoreCase=\"true\" words=\"lang/stopwords_cz.txt\" />\n            <filter class=\"solr.CzechStemFilterFactory\"/>\n        </analyzer>\n    </fieldType>\n\n    <!-- Danish -->\n    <dynamicField name=\"*_txt_da\" type=\"text_da\"  indexed=\"true\"  stored=\"true\"/>\n    <fieldType name=\"text_da\" class=\"solr.TextField\" positionIncrementGap=\"100\">\n        <analyzer>\n            <tokenizer class=\"solr.StandardTokenizerFactory\"/>\n            <filter class=\"solr.LowerCaseFilterFactory\"/>\n            <filter class=\"solr.StopFilterFactory\" ignoreCase=\"true\" words=\"lang/stopwords_da.txt\" format=\"snowball\" />\n            <filter class=\"solr.SnowballPorterFilterFactory\" language=\"Danish\"/>\n        </analyzer>\n    </fieldType>\n\n    <!-- German -->\n    <dynamicField name=\"*_txt_de\" type=\"text_de\"  indexed=\"true\"  stored=\"true\"/>\n    <fieldType name=\"text_de\" class=\"solr.TextField\" positionIncrementGap=\"100\">\n        <analyzer>\n            <tokenizer class=\"solr.StandardTokenizerFactory\"/>\n            <filter class=\"solr.LowerCaseFilterFactory\"/>\n            <filter class=\"solr.StopFilterFactory\" ignoreCase=\"true\" words=\"lang/stopwords_de.txt\" format=\"snowball\" />\n            <filter class=\"solr.GermanNormalizationFilterFactory\"/>\n            <filter class=\"solr.GermanLightStemFilterFactory\"/>\n            <!-- less aggressive: <filter class=\"solr.GermanMinimalStemFilterFactory\"/> -->\n            <!-- more aggressive: <filter class=\"solr.SnowballPorterFilterFactory\" language=\"German2\"/> -->\n        </analyzer>\n    </fieldType>\n\n    <!-- Greek -->\n    <dynamicField name=\"*_txt_el\" type=\"text_el\"  indexed=\"true\"  stored=\"true\"/>\n    <fieldType name=\"text_el\" class=\"solr.TextField\" positionIncrementGap=\"100\">\n        <analyzer>\n            <tokenizer class=\"solr.StandardTokenizerFactory\"/>\n            <!-- greek specific lowercase for sigma -->\n            <filter class=\"solr.GreekLowerCaseFilterFactory\"/>\n            <filter class=\"solr.StopFilterFactory\" ignoreCase=\"false\" words=\"lang/stopwords_el.txt\" />\n            <filter class=\"solr.GreekStemFilterFactory\"/>\n        </analyzer>\n    </fieldType>\n\n    <!-- Spanish -->\n    <dynamicField name=\"*_txt_es\" type=\"text_es\"  indexed=\"true\"  stored=\"true\"/>\n    <fieldType name=\"text_es\" class=\"solr.TextField\" positionIncrementGap=\"100\">\n        <analyzer>\n            <tokenizer class=\"solr.StandardTokenizerFactory\"/>\n            <filter class=\"solr.LowerCaseFilterFactory\"/>\n            <filter class=\"solr.StopFilterFactory\" ignoreCase=\"true\" words=\"lang/stopwords_es.txt\" format=\"snowball\" />\n            <filter class=\"solr.SpanishLightStemFilterFactory\"/>\n            <!-- more aggressive: <filter class=\"solr.SnowballPorterFilterFactory\" language=\"Spanish\"/> -->\n        </analyzer>\n    </fieldType>\n\n    <!-- Basque -->\n    <dynamicField name=\"*_txt_eu\" type=\"text_eu\"  indexed=\"true\"  stored=\"true\"/>\n    <fieldType name=\"text_eu\" class=\"solr.TextField\" positionIncrementGap=\"100\">\n        <analyzer>\n            <tokenizer class=\"solr.StandardTokenizerFactory\"/>\n            <filter class=\"solr.LowerCaseFilterFactory\"/>\n            <filter class=\"solr.StopFilterFactory\" ignoreCase=\"true\" words=\"lang/stopwords_eu.txt\" />\n            <filter class=\"solr.SnowballPorterFilterFactory\" language=\"Basque\"/>\n        </analyzer>\n    </fieldType>\n\n    <!-- Persian -->\n    <dynamicField name=\"*_txt_fa\" type=\"text_fa\"  indexed=\"true\"  stored=\"true\"/>\n    <fieldType name=\"text_fa\" class=\"solr.TextField\" positionIncrementGap=\"100\">\n        <analyzer>\n            <!-- for ZWNJ -->\n            <charFilter class=\"solr.PersianCharFilterFactory\"/>\n            <tokenizer class=\"solr.StandardTokenizerFactory\"/>\n            <filter class=\"solr.LowerCaseFilterFactory\"/>\n            <filter class=\"solr.ArabicNormalizationFilterFactory\"/>\n            <filter class=\"solr.PersianNormalizationFilterFactory\"/>\n            <filter class=\"solr.StopFilterFactory\" ignoreCase=\"true\" words=\"lang/stopwords_fa.txt\" />\n        </analyzer>\n    </fieldType>\n\n    <!-- Finnish -->\n    <dynamicField name=\"*_txt_fi\" type=\"text_fi\"  indexed=\"true\"  stored=\"true\"/>\n    <fieldType name=\"text_fi\" class=\"solr.TextField\" positionIncrementGap=\"100\">\n        <analyzer>\n            <tokenizer class=\"solr.StandardTokenizerFactory\"/>\n            <filter class=\"solr.LowerCaseFilterFactory\"/>\n            <filter class=\"solr.StopFilterFactory\" ignoreCase=\"true\" words=\"lang/stopwords_fi.txt\" format=\"snowball\" />\n            <filter class=\"solr.SnowballPorterFilterFactory\" language=\"Finnish\"/>\n            <!-- less aggressive: <filter class=\"solr.FinnishLightStemFilterFactory\"/> -->\n        </analyzer>\n    </fieldType>\n\n    <!-- French -->\n    <dynamicField name=\"*_txt_fr\" type=\"text_fr\"  indexed=\"true\"  stored=\"true\"/>\n    <fieldType name=\"text_fr\" class=\"solr.TextField\" positionIncrementGap=\"100\">\n        <analyzer>\n            <tokenizer class=\"solr.StandardTokenizerFactory\"/>\n            <!-- removes l', etc -->\n            <filter class=\"solr.ElisionFilterFactory\" ignoreCase=\"true\" articles=\"lang/contractions_fr.txt\"/>\n            <filter class=\"solr.LowerCaseFilterFactory\"/>\n            <filter class=\"solr.StopFilterFactory\" ignoreCase=\"true\" words=\"lang/stopwords_fr.txt\" format=\"snowball\" />\n            <filter class=\"solr.FrenchLightStemFilterFactory\"/>\n            <!-- less aggressive: <filter class=\"solr.FrenchMinimalStemFilterFactory\"/> -->\n            <!-- more aggressive: <filter class=\"solr.SnowballPorterFilterFactory\" language=\"French\"/> -->\n        </analyzer>\n    </fieldType>\n\n    <!-- Irish -->\n    <dynamicField name=\"*_txt_ga\" type=\"text_ga\"  indexed=\"true\"  stored=\"true\"/>\n    <fieldType name=\"text_ga\" class=\"solr.TextField\" positionIncrementGap=\"100\">\n        <analyzer>\n            <tokenizer class=\"solr.StandardTokenizerFactory\"/>\n            <!-- removes d', etc -->\n            <filter class=\"solr.ElisionFilterFactory\" ignoreCase=\"true\" articles=\"lang/contractions_ga.txt\"/>\n            <!-- removes n-, etc. position increments is intentionally false! -->\n            <filter class=\"solr.StopFilterFactory\" ignoreCase=\"true\" words=\"lang/hyphenations_ga.txt\"/>\n            <filter class=\"solr.IrishLowerCaseFilterFactory\"/>\n            <filter class=\"solr.StopFilterFactory\" ignoreCase=\"true\" words=\"lang/stopwords_ga.txt\"/>\n            <filter class=\"solr.SnowballPorterFilterFactory\" language=\"Irish\"/>\n        </analyzer>\n    </fieldType>\n\n    <!-- Galician -->\n    <dynamicField name=\"*_txt_gl\" type=\"text_gl\"  indexed=\"true\"  stored=\"true\"/>\n    <fieldType name=\"text_gl\" class=\"solr.TextField\" positionIncrementGap=\"100\">\n        <analyzer>\n            <tokenizer class=\"solr.StandardTokenizerFactory\"/>\n            <filter class=\"solr.LowerCaseFilterFactory\"/>\n            <filter class=\"solr.StopFilterFactory\" ignoreCase=\"true\" words=\"lang/stopwords_gl.txt\" />\n            <filter class=\"solr.GalicianStemFilterFactory\"/>\n            <!-- less aggressive: <filter class=\"solr.GalicianMinimalStemFilterFactory\"/> -->\n        </analyzer>\n    </fieldType>\n\n    <!-- Hindi -->\n    <dynamicField name=\"*_txt_hi\" type=\"text_hi\"  indexed=\"true\"  stored=\"true\"/>\n    <fieldType name=\"text_hi\" class=\"solr.TextField\" positionIncrementGap=\"100\">\n        <analyzer>\n            <tokenizer class=\"solr.StandardTokenizerFactory\"/>\n            <filter class=\"solr.LowerCaseFilterFactory\"/>\n            <!-- normalizes unicode representation -->\n            <filter class=\"solr.IndicNormalizationFilterFactory\"/>\n            <!-- normalizes variation in spelling -->\n            <filter class=\"solr.HindiNormalizationFilterFactory\"/>\n            <filter class=\"solr.StopFilterFactory\" ignoreCase=\"true\" words=\"lang/stopwords_hi.txt\" />\n            <filter class=\"solr.HindiStemFilterFactory\"/>\n        </analyzer>\n    </fieldType>\n\n    <!-- Hungarian -->\n    <dynamicField name=\"*_txt_hu\" type=\"text_hu\"  indexed=\"true\"  stored=\"true\"/>\n    <fieldType name=\"text_hu\" class=\"solr.TextField\" positionIncrementGap=\"100\">\n        <analyzer>\n            <tokenizer class=\"solr.StandardTokenizerFactory\"/>\n            <filter class=\"solr.LowerCaseFilterFactory\"/>\n            <filter class=\"solr.StopFilterFactory\" ignoreCase=\"true\" words=\"lang/stopwords_hu.txt\" format=\"snowball\" />\n            <filter class=\"solr.SnowballPorterFilterFactory\" language=\"Hungarian\"/>\n            <!-- less aggressive: <filter class=\"solr.HungarianLightStemFilterFactory\"/> -->\n        </analyzer>\n    </fieldType>\n\n    <!-- Armenian -->\n    <dynamicField name=\"*_txt_hy\" type=\"text_hy\"  indexed=\"true\"  stored=\"true\"/>\n    <fieldType name=\"text_hy\" class=\"solr.TextField\" positionIncrementGap=\"100\">\n        <analyzer>\n            <tokenizer class=\"solr.StandardTokenizerFactory\"/>\n            <filter class=\"solr.LowerCaseFilterFactory\"/>\n            <filter class=\"solr.StopFilterFactory\" ignoreCase=\"true\" words=\"lang/stopwords_hy.txt\" />\n            <filter class=\"solr.SnowballPorterFilterFactory\" language=\"Armenian\"/>\n        </analyzer>\n    </fieldType>\n\n    <!-- Indonesian -->\n    <dynamicField name=\"*_txt_id\" type=\"text_id\"  indexed=\"true\"  stored=\"true\"/>\n    <fieldType name=\"text_id\" class=\"solr.TextField\" positionIncrementGap=\"100\">\n        <analyzer>\n            <tokenizer class=\"solr.StandardTokenizerFactory\"/>\n            <filter class=\"solr.LowerCaseFilterFactory\"/>\n            <filter class=\"solr.StopFilterFactory\" ignoreCase=\"true\" words=\"lang/stopwords_id.txt\" />\n            <!-- for a less aggressive approach (only inflectional suffixes), set stemDerivational to false -->\n            <filter class=\"solr.IndonesianStemFilterFactory\" stemDerivational=\"true\"/>\n        </analyzer>\n    </fieldType>\n\n    <!-- Italian -->\n    <dynamicField name=\"*_txt_it\" type=\"text_it\"  indexed=\"true\"  stored=\"true\"/>\n    <fieldType name=\"text_it\" class=\"solr.TextField\" positionIncrementGap=\"100\">\n        <analyzer>\n            <tokenizer class=\"solr.StandardTokenizerFactory\"/>\n            <!-- removes l', etc -->\n            <filter class=\"solr.ElisionFilterFactory\" ignoreCase=\"true\" articles=\"lang/contractions_it.txt\"/>\n            <filter class=\"solr.LowerCaseFilterFactory\"/>\n            <filter class=\"solr.StopFilterFactory\" ignoreCase=\"true\" words=\"lang/stopwords_it.txt\" format=\"snowball\" />\n            <filter class=\"solr.ItalianLightStemFilterFactory\"/>\n            <!-- more aggressive: <filter class=\"solr.SnowballPorterFilterFactory\" language=\"Italian\"/> -->\n        </analyzer>\n    </fieldType>\n\n    <!-- Japanese using morphological analysis (see text_cjk for a configuration using bigramming)\n\n         NOTE: If you want to optimize search for precision, use default operator AND in your query\n         parser config with <solrQueryParser defaultOperator=\"AND\"/> further down in this file.  Use\n         OR if you would like to optimize for recall (default).\n    -->\n    <dynamicField name=\"*_txt_ja\" type=\"text_ja\"  indexed=\"true\"  stored=\"true\"/>\n    <fieldType name=\"text_ja\" class=\"solr.TextField\" positionIncrementGap=\"100\" autoGeneratePhraseQueries=\"false\">\n        <analyzer>\n            <!-- Kuromoji Japanese morphological analyzer/tokenizer (JapaneseTokenizer)\n\n               Kuromoji has a search mode (default) that does segmentation useful for search.  A heuristic\n               is used to segment compounds into its parts and the compound itself is kept as synonym.\n\n               Valid values for attribute mode are:\n                  normal: regular segmentation\n                  search: segmentation useful for search with synonyms compounds (default)\n                extended: same as search mode, but unigrams unknown words (experimental)\n\n               For some applications it might be good to use search mode for indexing and normal mode for\n               queries to reduce recall and prevent parts of compounds from being matched and highlighted.\n               Use <analyzer type=\"index\"> and <analyzer type=\"query\"> for this and mode normal in query.\n\n               Kuromoji also has a convenient user dictionary feature that allows overriding the statistical\n               model with your own entries for segmentation, part-of-speech tags and readings without a need\n               to specify weights.  Notice that user dictionaries have not been subject to extensive testing.\n\n               User dictionary attributes are:\n                         userDictionary: user dictionary filename\n                 userDictionaryEncoding: user dictionary encoding (default is UTF-8)\n\n               See lang/userdict_ja.txt for a sample user dictionary file.\n\n               Punctuation characters are discarded by default.  Use discardPunctuation=\"false\" to keep them.\n\n               See http://wiki.apache.org/solr/JapaneseLanguageSupport for more on Japanese language support.\n            -->\n            <tokenizer class=\"solr.JapaneseTokenizerFactory\" mode=\"search\"/>\n            <!--<tokenizer class=\"solr.JapaneseTokenizerFactory\" mode=\"search\" userDictionary=\"lang/userdict_ja.txt\"/>-->\n            <!-- Reduces inflected verbs and adjectives to their base/dictionary forms  -->\n            <filter class=\"solr.JapaneseBaseFormFilterFactory\"/>\n            <!-- Removes tokens with certain part-of-speech tags -->\n            <filter class=\"solr.JapanesePartOfSpeechStopFilterFactory\" tags=\"lang/stoptags_ja.txt\" />\n            <!-- Normalizes full-width romaji to half-width and half-width kana to full-width (Unicode NFKC subset) -->\n            <filter class=\"solr.CJKWidthFilterFactory\"/>\n            <!-- Removes common tokens typically not useful for search, but have a negative effect on ranking -->\n            <filter class=\"solr.StopFilterFactory\" ignoreCase=\"true\" words=\"lang/stopwords_ja.txt\" />\n            <!-- Normalizes common katakana spelling variations by removing any last long sound character (U+30FC) -->\n            <filter class=\"solr.JapaneseKatakanaStemFilterFactory\" minimumLength=\"4\"/>\n            <!-- Lower-cases romaji characters -->\n            <filter class=\"solr.LowerCaseFilterFactory\"/>\n        </analyzer>\n    </fieldType>\n\n    <!-- Latvian -->\n    <dynamicField name=\"*_txt_lv\" type=\"text_lv\"  indexed=\"true\"  stored=\"true\"/>\n    <fieldType name=\"text_lv\" class=\"solr.TextField\" positionIncrementGap=\"100\">\n        <analyzer>\n            <tokenizer class=\"solr.StandardTokenizerFactory\"/>\n            <filter class=\"solr.LowerCaseFilterFactory\"/>\n            <filter class=\"solr.StopFilterFactory\" ignoreCase=\"true\" words=\"lang/stopwords_lv.txt\" />\n            <filter class=\"solr.LatvianStemFilterFactory\"/>\n        </analyzer>\n    </fieldType>\n\n    <!-- Dutch -->\n    <dynamicField name=\"*_txt_nl\" type=\"text_nl\"  indexed=\"true\"  stored=\"true\"/>\n    <fieldType name=\"text_nl\" class=\"solr.TextField\" positionIncrementGap=\"100\">\n        <analyzer>\n            <tokenizer class=\"solr.StandardTokenizerFactory\"/>\n            <filter class=\"solr.LowerCaseFilterFactory\"/>\n            <filter class=\"solr.StopFilterFactory\" ignoreCase=\"true\" words=\"lang/stopwords_nl.txt\" format=\"snowball\" />\n            <filter class=\"solr.StemmerOverrideFilterFactory\" dictionary=\"lang/stemdict_nl.txt\" ignoreCase=\"false\"/>\n            <filter class=\"solr.SnowballPorterFilterFactory\" language=\"Dutch\"/>\n        </analyzer>\n    </fieldType>\n\n    <!-- Norwegian -->\n    <dynamicField name=\"*_txt_no\" type=\"text_no\"  indexed=\"true\"  stored=\"true\"/>\n    <fieldType name=\"text_no\" class=\"solr.TextField\" positionIncrementGap=\"100\">\n        <analyzer>\n            <tokenizer class=\"solr.StandardTokenizerFactory\"/>\n            <filter class=\"solr.LowerCaseFilterFactory\"/>\n            <filter class=\"solr.StopFilterFactory\" ignoreCase=\"true\" words=\"lang/stopwords_no.txt\" format=\"snowball\" />\n            <filter class=\"solr.SnowballPorterFilterFactory\" language=\"Norwegian\"/>\n            <!-- less aggressive: <filter class=\"solr.NorwegianLightStemFilterFactory\"/> -->\n            <!-- singular/plural: <filter class=\"solr.NorwegianMinimalStemFilterFactory\"/> -->\n        </analyzer>\n    </fieldType>\n\n    <!-- Portuguese -->\n    <dynamicField name=\"*_txt_pt\" type=\"text_pt\"  indexed=\"true\"  stored=\"true\"/>\n    <fieldType name=\"text_pt\" class=\"solr.TextField\" positionIncrementGap=\"100\">\n        <analyzer>\n            <tokenizer class=\"solr.StandardTokenizerFactory\"/>\n            <filter class=\"solr.LowerCaseFilterFactory\"/>\n            <filter class=\"solr.StopFilterFactory\" ignoreCase=\"true\" words=\"lang/stopwords_pt.txt\" format=\"snowball\" />\n            <filter class=\"solr.PortugueseLightStemFilterFactory\"/>\n            <!-- less aggressive: <filter class=\"solr.PortugueseMinimalStemFilterFactory\"/> -->\n            <!-- more aggressive: <filter class=\"solr.SnowballPorterFilterFactory\" language=\"Portuguese\"/> -->\n            <!-- most aggressive: <filter class=\"solr.PortugueseStemFilterFactory\"/> -->\n        </analyzer>\n    </fieldType>\n\n    <!-- Romanian -->\n    <dynamicField name=\"*_txt_ro\" type=\"text_ro\"  indexed=\"true\"  stored=\"true\"/>\n    <fieldType name=\"text_ro\" class=\"solr.TextField\" positionIncrementGap=\"100\">\n        <analyzer>\n            <tokenizer class=\"solr.StandardTokenizerFactory\"/>\n            <filter class=\"solr.LowerCaseFilterFactory\"/>\n            <filter class=\"solr.StopFilterFactory\" ignoreCase=\"true\" words=\"lang/stopwords_ro.txt\" />\n            <filter class=\"solr.SnowballPorterFilterFactory\" language=\"Romanian\"/>\n        </analyzer>\n    </fieldType>\n\n    <!-- Russian -->\n    <dynamicField name=\"*_txt_ru\" type=\"text_ru\"  indexed=\"true\"  stored=\"true\"/>\n    <fieldType name=\"text_ru\" class=\"solr.TextField\" positionIncrementGap=\"100\">\n        <analyzer>\n            <tokenizer class=\"solr.StandardTokenizerFactory\"/>\n            <filter class=\"solr.LowerCaseFilterFactory\"/>\n            <filter class=\"solr.StopFilterFactory\" ignoreCase=\"true\" words=\"lang/stopwords_ru.txt\" format=\"snowball\" />\n            <filter class=\"solr.SnowballPorterFilterFactory\" language=\"Russian\"/>\n            <!-- less aggressive: <filter class=\"solr.RussianLightStemFilterFactory\"/> -->\n        </analyzer>\n    </fieldType>\n\n    <!-- Swedish -->\n    <dynamicField name=\"*_txt_sv\" type=\"text_sv\"  indexed=\"true\"  stored=\"true\"/>\n    <fieldType name=\"text_sv\" class=\"solr.TextField\" positionIncrementGap=\"100\">\n        <analyzer>\n            <tokenizer class=\"solr.StandardTokenizerFactory\"/>\n            <filter class=\"solr.LowerCaseFilterFactory\"/>\n            <filter class=\"solr.StopFilterFactory\" ignoreCase=\"true\" words=\"lang/stopwords_sv.txt\" format=\"snowball\" />\n            <filter class=\"solr.SnowballPorterFilterFactory\" language=\"Swedish\"/>\n            <!-- less aggressive: <filter class=\"solr.SwedishLightStemFilterFactory\"/> -->\n        </analyzer>\n    </fieldType>\n\n    <!-- Thai -->\n    <dynamicField name=\"*_txt_th\" type=\"text_th\"  indexed=\"true\"  stored=\"true\"/>\n    <fieldType name=\"text_th\" class=\"solr.TextField\" positionIncrementGap=\"100\">\n        <analyzer>\n            <tokenizer class=\"solr.ThaiTokenizerFactory\"/>\n            <filter class=\"solr.LowerCaseFilterFactory\"/>\n            <filter class=\"solr.StopFilterFactory\" ignoreCase=\"true\" words=\"lang/stopwords_th.txt\" />\n        </analyzer>\n    </fieldType>\n\n    <!-- Turkish -->\n    <dynamicField name=\"*_txt_tr\" type=\"text_tr\"  indexed=\"true\"  stored=\"true\"/>\n    <fieldType name=\"text_tr\" class=\"solr.TextField\" positionIncrementGap=\"100\">\n        <analyzer>\n            <tokenizer class=\"solr.StandardTokenizerFactory\"/>\n            <filter class=\"solr.TurkishLowerCaseFilterFactory\"/>\n            <filter class=\"solr.StopFilterFactory\" ignoreCase=\"false\" words=\"lang/stopwords_tr.txt\" />\n            <filter class=\"solr.SnowballPorterFilterFactory\" language=\"Turkish\"/>\n        </analyzer>\n    </fieldType>\n\n    <!-- Similarity is the scoring routine for each document vs. a query.\n       A custom Similarity or SimilarityFactory may be specified here, but\n       the default is fine for most applications.\n       For more info: http://wiki.apache.org/solr/SchemaXml#Similarity\n    -->\n    <!--\n     <similarity class=\"com.example.solr.CustomSimilarityFactory\">\n       <str name=\"paramkey\">param value</str>\n     </similarity>\n    -->\n\n</schema>\n"
  },
  {
    "path": "test_haystack/solr_tests/server/confdir/solrconfig.xml",
    "content": "<?xml version=\"1.0\" encoding=\"UTF-8\" ?>\n<!--\n Licensed to the Apache Software Foundation (ASF) under one or more\n contributor license agreements.  See the NOTICE file distributed with\n this work for additional information regarding copyright ownership.\n The ASF licenses this file to You under the Apache License, Version 2.0\n (the \"License\"); you may not use this file except in compliance with\n the License.  You may obtain a copy of the License at\n\n     http://www.apache.org/licenses/LICENSE-2.0\n\n Unless required by applicable law or agreed to in writing, software\n distributed under the License is distributed on an \"AS IS\" BASIS,\n WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n See the License for the specific language governing permissions and\n limitations under the License.\n-->\n\n<!--\n     For more details about configurations options that may appear in\n     this file, see http://wiki.apache.org/solr/SolrConfigXml.\n-->\n<config>\n  <!-- In all configuration below, a prefix of \"solr.\" for class names\n       is an alias that causes solr to search appropriate packages,\n       including org.apache.solr.(search|update|request|core|analysis)\n\n       You may also specify a fully qualified Java classname if you\n       have your own custom plugins.\n    -->\n\n  <!-- Controls what version of Lucene various components of Solr\n       adhere to.  Generally, you want to use the latest version to\n       get all bug fixes and improvements. It is highly recommended\n       that you fully re-index after changing this setting as it can\n       affect both how text is indexed and queried.\n  -->\n  <luceneMatchVersion>6.5.0</luceneMatchVersion>\n  <schemaFactory class=\"ClassicIndexSchemaFactory\"/>\n\n  <!-- <lib/> directives can be used to instruct Solr to load any Jars\n       identified and use them to resolve any \"plugins\" specified in\n       your solrconfig.xml or schema.xml (ie: Analyzers, Request\n       Handlers, etc...).\n\n       All directories and paths are resolved relative to the\n       instanceDir.\n\n       Please note that <lib/> directives are processed in the order\n       that they appear in your solrconfig.xml file, and are \"stacked\"\n       on top of each other when building a ClassLoader - so if you have\n       plugin jars with dependencies on other jars, the \"lower level\"\n       dependency jars should be loaded first.\n\n       If a \"./lib\" directory exists in your instanceDir, all files\n       found in it are included as if you had used the following\n       syntax...\n\n              <lib dir=\"./lib\" />\n    -->\n\n  <!-- A 'dir' option by itself adds any files found in the directory\n       to the classpath, this is useful for including all jars in a\n       directory.\n\n       When a 'regex' is specified in addition to a 'dir', only the\n       files in that directory which completely match the regex\n       (anchored on both ends) will be included.\n\n       If a 'dir' option (with or without a regex) is used and nothing\n       is found that matches, a warning will be logged.\n\n       The examples below can be used to load some solr-contribs along\n       with their external dependencies.\n    -->\n  <lib dir=\"${solr.install.dir:../../../..}/contrib/extraction/lib\" regex=\".*\\.jar\" />\n  <lib dir=\"${solr.install.dir:../../../..}/dist/\" regex=\"solr-cell-\\d.*\\.jar\" />\n\n  <lib dir=\"${solr.install.dir:../../../..}/contrib/clustering/lib/\" regex=\".*\\.jar\" />\n  <lib dir=\"${solr.install.dir:../../../..}/dist/\" regex=\"solr-clustering-\\d.*\\.jar\" />\n\n  <lib dir=\"${solr.install.dir:../../../..}/contrib/langid/lib/\" regex=\".*\\.jar\" />\n  <lib dir=\"${solr.install.dir:../../../..}/dist/\" regex=\"solr-langid-\\d.*\\.jar\" />\n\n  <lib dir=\"${solr.install.dir:../../../..}/contrib/velocity/lib\" regex=\".*\\.jar\" />\n  <lib dir=\"${solr.install.dir:../../../..}/dist/\" regex=\"solr-velocity-\\d.*\\.jar\" />\n  <!-- an exact 'path' can be used instead of a 'dir' to specify a\n       specific jar file.  This will cause a serious error to be logged\n       if it can't be loaded.\n    -->\n  <!--\n     <lib path=\"../a-jar-that-does-not-exist.jar\" />\n  -->\n\n  <!-- Data Directory\n\n       Used to specify an alternate directory to hold all index data\n       other than the default ./data under the Solr home.  If\n       replication is in use, this should match the replication\n       configuration.\n    -->\n  <dataDir>${solr.data.dir:}</dataDir>\n\n\n  <!-- The DirectoryFactory to use for indexes.\n\n       solr.StandardDirectoryFactory is filesystem\n       based and tries to pick the best implementation for the current\n       JVM and platform.  solr.NRTCachingDirectoryFactory, the default,\n       wraps solr.StandardDirectoryFactory and caches small files in memory\n       for better NRT performance.\n\n       One can force a particular implementation via solr.MMapDirectoryFactory,\n       solr.NIOFSDirectoryFactory, or solr.SimpleFSDirectoryFactory.\n\n       solr.RAMDirectoryFactory is memory based, not\n       persistent, and doesn't work with replication.\n    -->\n  <directoryFactory name=\"DirectoryFactory\"\n                    class=\"${solr.directoryFactory:solr.NRTCachingDirectoryFactory}\"/>\n\n  <!-- The CodecFactory for defining the format of the inverted index.\n       The default implementation is SchemaCodecFactory, which is the official Lucene\n       index format, but hooks into the schema to provide per-field customization of\n       the postings lists and per-document values in the fieldType element\n       (postingsFormat/docValuesFormat). Note that most of the alternative implementations\n       are experimental, so if you choose to customize the index format, it's a good\n       idea to convert back to the official format e.g. via IndexWriter.addIndexes(IndexReader)\n       before upgrading to a newer version to avoid unnecessary reindexing.\n       A \"compressionMode\" string element can be added to <codecFactory> to choose\n       between the existing compression modes in the default codec: \"BEST_SPEED\" (default)\n       or \"BEST_COMPRESSION\".\n  -->\n  <codecFactory class=\"solr.SchemaCodecFactory\"/>\n\n  <!-- ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~\n       Index Config - These settings control low-level behavior of indexing\n       Most example settings here show the default value, but are commented\n       out, to more easily see where customizations have been made.\n\n       Note: This replaces <indexDefaults> and <mainIndex> from older versions\n       ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ -->\n  <indexConfig>\n    <!-- maxFieldLength was removed in 4.0. To get similar behavior, include a\n         LimitTokenCountFilterFactory in your fieldType definition. E.g.\n     <filter class=\"solr.LimitTokenCountFilterFactory\" maxTokenCount=\"10000\"/>\n    -->\n    <!-- Maximum time to wait for a write lock (ms) for an IndexWriter. Default: 1000 -->\n    <!-- <writeLockTimeout>1000</writeLockTimeout>  -->\n\n    <!-- Expert: Enabling compound file will use less files for the index,\n         using fewer file descriptors on the expense of performance decrease.\n         Default in Lucene is \"true\". Default in Solr is \"false\" (since 3.6) -->\n    <!-- <useCompoundFile>false</useCompoundFile> -->\n\n    <!-- ramBufferSizeMB sets the amount of RAM that may be used by Lucene\n         indexing for buffering added documents and deletions before they are\n         flushed to the Directory.\n         maxBufferedDocs sets a limit on the number of documents buffered\n         before flushing.\n         If both ramBufferSizeMB and maxBufferedDocs is set, then\n         Lucene will flush based on whichever limit is hit first.  -->\n    <!-- <ramBufferSizeMB>100</ramBufferSizeMB> -->\n    <!-- <maxBufferedDocs>1000</maxBufferedDocs> -->\n\n    <!-- Expert: Merge Policy\n         The Merge Policy in Lucene controls how merging of segments is done.\n         The default since Solr/Lucene 3.3 is TieredMergePolicy.\n         The default since Lucene 2.3 was the LogByteSizeMergePolicy,\n         Even older versions of Lucene used LogDocMergePolicy.\n      -->\n    <!--\n        <mergePolicyFactory class=\"org.apache.solr.index.TieredMergePolicyFactory\">\n          <int name=\"maxMergeAtOnce\">10</int>\n          <int name=\"segmentsPerTier\">10</int>\n          <double name=\"noCFSRatio\">0.1</double>\n        </mergePolicyFactory>\n      -->\n\n    <!-- Expert: Merge Scheduler\n         The Merge Scheduler in Lucene controls how merges are\n         performed.  The ConcurrentMergeScheduler (Lucene 2.3 default)\n         can perform merges in the background using separate threads.\n         The SerialMergeScheduler (Lucene 2.2 default) does not.\n     -->\n    <!--\n       <mergeScheduler class=\"org.apache.lucene.index.ConcurrentMergeScheduler\"/>\n       -->\n\n    <!-- LockFactory\n\n         This option specifies which Lucene LockFactory implementation\n         to use.\n\n         single = SingleInstanceLockFactory - suggested for a\n                  read-only index or when there is no possibility of\n                  another process trying to modify the index.\n         native = NativeFSLockFactory - uses OS native file locking.\n                  Do not use when multiple solr webapps in the same\n                  JVM are attempting to share a single index.\n         simple = SimpleFSLockFactory  - uses a plain file for locking\n\n         Defaults: 'native' is default for Solr3.6 and later, otherwise\n                   'simple' is the default\n\n         More details on the nuances of each LockFactory...\n         http://wiki.apache.org/lucene-java/AvailableLockFactories\n    -->\n    <lockType>${solr.lock.type:native}</lockType>\n\n    <!-- Commit Deletion Policy\n         Custom deletion policies can be specified here. The class must\n         implement org.apache.lucene.index.IndexDeletionPolicy.\n\n         The default Solr IndexDeletionPolicy implementation supports\n         deleting index commit points on number of commits, age of\n         commit point and optimized status.\n\n         The latest commit point should always be preserved regardless\n         of the criteria.\n    -->\n    <!--\n    <deletionPolicy class=\"solr.SolrDeletionPolicy\">\n    -->\n    <!-- The number of commit points to be kept -->\n    <!-- <str name=\"maxCommitsToKeep\">1</str> -->\n    <!-- The number of optimized commit points to be kept -->\n    <!-- <str name=\"maxOptimizedCommitsToKeep\">0</str> -->\n    <!--\n        Delete all commit points once they have reached the given age.\n        Supports DateMathParser syntax e.g.\n      -->\n    <!--\n       <str name=\"maxCommitAge\">30MINUTES</str>\n       <str name=\"maxCommitAge\">1DAY</str>\n    -->\n    <!--\n    </deletionPolicy>\n    -->\n\n    <!-- Lucene Infostream\n\n         To aid in advanced debugging, Lucene provides an \"InfoStream\"\n         of detailed information when indexing.\n\n         Setting The value to true will instruct the underlying Lucene\n         IndexWriter to write its debugging info the specified file\n      -->\n    <!-- <infoStream file=\"INFOSTREAM.txt\">false</infoStream> -->\n  </indexConfig>\n\n\n  <!-- JMX\n\n       This example enables JMX if and only if an existing MBeanServer\n       is found, use this if you want to configure JMX through JVM\n       parameters. Remove this to disable exposing Solr configuration\n       and statistics to JMX.\n\n       For more details see http://wiki.apache.org/solr/SolrJmx\n    -->\n  <jmx />\n  <!-- If you want to connect to a particular server, specify the\n       agentId\n    -->\n  <!-- <jmx agentId=\"myAgent\" /> -->\n  <!-- If you want to start a new MBeanServer, specify the serviceUrl -->\n  <!-- <jmx serviceUrl=\"service:jmx:rmi:///jndi/rmi://localhost:9999/solr\"/>\n    -->\n\n  <!-- The default high-performance update handler -->\n  <updateHandler class=\"solr.DirectUpdateHandler2\">\n\n    <!-- Enables a transaction log, used for real-time get, durability, and\n         and solr cloud replica recovery.  The log can grow as big as\n         uncommitted changes to the index, so use of a hard autoCommit\n         is recommended (see below).\n         \"dir\" - the target directory for transaction logs, defaults to the\n                solr data directory.\n         \"numVersionBuckets\" - sets the number of buckets used to keep\n                track of max version values when checking for re-ordered\n                updates; increase this value to reduce the cost of\n                synchronizing access to version buckets during high-volume\n                indexing, this requires 8 bytes (long) * numVersionBuckets\n                of heap space per Solr core.\n    -->\n    <updateLog>\n      <str name=\"dir\">${solr.ulog.dir:}</str>\n      <int name=\"numVersionBuckets\">${solr.ulog.numVersionBuckets:65536}</int>\n    </updateLog>\n\n    <!-- AutoCommit\n\n         Perform a hard commit automatically under certain conditions.\n         Instead of enabling autoCommit, consider using \"commitWithin\"\n         when adding documents.\n\n         http://wiki.apache.org/solr/UpdateXmlMessages\n\n         maxDocs - Maximum number of documents to add since the last\n                   commit before automatically triggering a new commit.\n\n         maxTime - Maximum amount of time in ms that is allowed to pass\n                   since a document was added before automatically\n                   triggering a new commit.\n         openSearcher - if false, the commit causes recent index changes\n           to be flushed to stable storage, but does not cause a new\n           searcher to be opened to make those changes visible.\n\n         If the updateLog is enabled, then it's highly recommended to\n         have some sort of hard autoCommit to limit the log size.\n      -->\n    <autoCommit>\n      <maxTime>${solr.autoCommit.maxTime:15000}</maxTime>\n      <openSearcher>false</openSearcher>\n    </autoCommit>\n\n    <!-- softAutoCommit is like autoCommit except it causes a\n         'soft' commit which only ensures that changes are visible\n         but does not ensure that data is synced to disk.  This is\n         faster and more near-realtime friendly than a hard commit.\n      -->\n\n    <autoSoftCommit>\n      <maxTime>${solr.autoSoftCommit.maxTime:-1}</maxTime>\n    </autoSoftCommit>\n\n    <!-- Update Related Event Listeners\n\n         Various IndexWriter related events can trigger Listeners to\n         take actions.\n\n         postCommit - fired after every commit or optimize command\n         postOptimize - fired after every optimize command\n      -->\n    <!-- The RunExecutableListener executes an external command from a\n         hook such as postCommit or postOptimize.\n\n         exe - the name of the executable to run\n         dir - dir to use as the current working directory. (default=\".\")\n         wait - the calling thread waits until the executable returns.\n                (default=\"true\")\n         args - the arguments to pass to the program.  (default is none)\n         env - environment variables to set.  (default is none)\n      -->\n    <!-- This example shows how RunExecutableListener could be used\n         with the script based replication...\n         http://wiki.apache.org/solr/CollectionDistribution\n      -->\n    <!--\n       <listener event=\"postCommit\" class=\"solr.RunExecutableListener\">\n         <str name=\"exe\">solr/bin/snapshooter</str>\n         <str name=\"dir\">.</str>\n         <bool name=\"wait\">true</bool>\n         <arr name=\"args\"> <str>arg1</str> <str>arg2</str> </arr>\n         <arr name=\"env\"> <str>MYVAR=val1</str> </arr>\n       </listener>\n      -->\n\n  </updateHandler>\n\n  <!-- IndexReaderFactory\n\n       Use the following format to specify a custom IndexReaderFactory,\n       which allows for alternate IndexReader implementations.\n\n       ** Experimental Feature **\n\n       Please note - Using a custom IndexReaderFactory may prevent\n       certain other features from working. The API to\n       IndexReaderFactory may change without warning or may even be\n       removed from future releases if the problems cannot be\n       resolved.\n\n\n       ** Features that may not work with custom IndexReaderFactory **\n\n       The ReplicationHandler assumes a disk-resident index. Using a\n       custom IndexReader implementation may cause incompatibility\n       with ReplicationHandler and may cause replication to not work\n       correctly. See SOLR-1366 for details.\n\n    -->\n  <!--\n  <indexReaderFactory name=\"IndexReaderFactory\" class=\"package.class\">\n    <str name=\"someArg\">Some Value</str>\n  </indexReaderFactory >\n  -->\n\n  <!-- ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~\n       Query section - these settings control query time things like caches\n       ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ -->\n  <query>\n    <!-- Max Boolean Clauses\n\n         Maximum number of clauses in each BooleanQuery,  an exception\n         is thrown if exceeded.\n\n         ** WARNING **\n\n         This option actually modifies a global Lucene property that\n         will affect all SolrCores.  If multiple solrconfig.xml files\n         disagree on this property, the value at any given moment will\n         be based on the last SolrCore to be initialized.\n\n      -->\n    <maxBooleanClauses>1024</maxBooleanClauses>\n\n\n    <!-- Solr Internal Query Caches\n\n         There are two implementations of cache available for Solr,\n         LRUCache, based on a synchronized LinkedHashMap, and\n         FastLRUCache, based on a ConcurrentHashMap.\n\n         FastLRUCache has faster gets and slower puts in single\n         threaded operation and thus is generally faster than LRUCache\n         when the hit ratio of the cache is high (> 75%), and may be\n         faster under other scenarios on multi-cpu systems.\n    -->\n\n    <!-- Filter Cache\n\n         Cache used by SolrIndexSearcher for filters (DocSets),\n         unordered sets of *all* documents that match a query.  When a\n         new searcher is opened, its caches may be prepopulated or\n         \"autowarmed\" using data from caches in the old searcher.\n         autowarmCount is the number of items to prepopulate.  For\n         LRUCache, the autowarmed items will be the most recently\n         accessed items.\n\n         Parameters:\n           class - the SolrCache implementation LRUCache or\n               (LRUCache or FastLRUCache)\n           size - the maximum number of entries in the cache\n           initialSize - the initial capacity (number of entries) of\n               the cache.  (see java.util.HashMap)\n           autowarmCount - the number of entries to prepopulate from\n               and old cache.\n           maxRamMB - the maximum amount of RAM (in MB) that this cache is allowed\n                      to occupy. Note that when this option is specified, the size\n                      and initialSize parameters are ignored.\n      -->\n    <filterCache class=\"solr.FastLRUCache\"\n                 size=\"512\"\n                 initialSize=\"512\"\n                 autowarmCount=\"0\"/>\n\n    <!-- Query Result Cache\n\n         Caches results of searches - ordered lists of document ids\n         (DocList) based on a query, a sort, and the range of documents requested.\n         Additional supported parameter by LRUCache:\n            maxRamMB - the maximum amount of RAM (in MB) that this cache is allowed\n                       to occupy\n      -->\n    <queryResultCache class=\"solr.LRUCache\"\n                      size=\"512\"\n                      initialSize=\"512\"\n                      autowarmCount=\"0\"/>\n\n    <!-- Document Cache\n\n         Caches Lucene Document objects (the stored fields for each\n         document).  Since Lucene internal document ids are transient,\n         this cache will not be autowarmed.\n      -->\n    <documentCache class=\"solr.LRUCache\"\n                   size=\"512\"\n                   initialSize=\"512\"\n                   autowarmCount=\"0\"/>\n\n    <!-- custom cache currently used by block join -->\n    <cache name=\"perSegFilter\"\n           class=\"solr.search.LRUCache\"\n           size=\"10\"\n           initialSize=\"0\"\n           autowarmCount=\"10\"\n           regenerator=\"solr.NoOpRegenerator\" />\n\n    <!-- Field Value Cache\n\n         Cache used to hold field values that are quickly accessible\n         by document id.  The fieldValueCache is created by default\n         even if not configured here.\n      -->\n    <!--\n       <fieldValueCache class=\"solr.FastLRUCache\"\n                        size=\"512\"\n                        autowarmCount=\"128\"\n                        showItems=\"32\" />\n      -->\n\n    <!-- Custom Cache\n\n         Example of a generic cache.  These caches may be accessed by\n         name through SolrIndexSearcher.getCache(),cacheLookup(), and\n         cacheInsert().  The purpose is to enable easy caching of\n         user/application level data.  The regenerator argument should\n         be specified as an implementation of solr.CacheRegenerator\n         if autowarming is desired.\n      -->\n    <!--\n       <cache name=\"myUserCache\"\n              class=\"solr.LRUCache\"\n              size=\"4096\"\n              initialSize=\"1024\"\n              autowarmCount=\"1024\"\n              regenerator=\"com.mycompany.MyRegenerator\"\n              />\n      -->\n\n\n    <!-- Lazy Field Loading\n\n         If true, stored fields that are not requested will be loaded\n         lazily.  This can result in a significant speed improvement\n         if the usual case is to not load all stored fields,\n         especially if the skipped fields are large compressed text\n         fields.\n    -->\n    <enableLazyFieldLoading>true</enableLazyFieldLoading>\n\n    <!-- Use Filter For Sorted Query\n\n         A possible optimization that attempts to use a filter to\n         satisfy a search.  If the requested sort does not include\n         score, then the filterCache will be checked for a filter\n         matching the query. If found, the filter will be used as the\n         source of document ids, and then the sort will be applied to\n         that.\n\n         For most situations, this will not be useful unless you\n         frequently get the same search repeatedly with different sort\n         options, and none of them ever use \"score\"\n      -->\n    <!--\n       <useFilterForSortedQuery>true</useFilterForSortedQuery>\n      -->\n\n    <!-- Result Window Size\n\n         An optimization for use with the queryResultCache.  When a search\n         is requested, a superset of the requested number of document ids\n         are collected.  For example, if a search for a particular query\n         requests matching documents 10 through 19, and queryWindowSize is 50,\n         then documents 0 through 49 will be collected and cached.  Any further\n         requests in that range can be satisfied via the cache.\n      -->\n    <queryResultWindowSize>20</queryResultWindowSize>\n\n    <!-- Maximum number of documents to cache for any entry in the\n         queryResultCache.\n      -->\n    <queryResultMaxDocsCached>200</queryResultMaxDocsCached>\n\n    <!-- Query Related Event Listeners\n\n         Various IndexSearcher related events can trigger Listeners to\n         take actions.\n\n         newSearcher - fired whenever a new searcher is being prepared\n         and there is a current searcher handling requests (aka\n         registered).  It can be used to prime certain caches to\n         prevent long request times for certain requests.\n\n         firstSearcher - fired whenever a new searcher is being\n         prepared but there is no current registered searcher to handle\n         requests or to gain autowarming data from.\n\n\n      -->\n    <!-- QuerySenderListener takes an array of NamedList and executes a\n         local query request for each NamedList in sequence.\n      -->\n    <listener event=\"newSearcher\" class=\"solr.QuerySenderListener\">\n      <arr name=\"queries\">\n        <!--\n           <lst><str name=\"q\">solr</str><str name=\"sort\">price asc</str></lst>\n           <lst><str name=\"q\">rocks</str><str name=\"sort\">weight asc</str></lst>\n          -->\n      </arr>\n    </listener>\n    <listener event=\"firstSearcher\" class=\"solr.QuerySenderListener\">\n      <arr name=\"queries\">\n        <!--\n        <lst>\n          <str name=\"q\">static firstSearcher warming in solrconfig.xml</str>\n        </lst>\n        -->\n      </arr>\n    </listener>\n\n    <!-- Use Cold Searcher\n\n         If a search request comes in and there is no current\n         registered searcher, then immediately register the still\n         warming searcher and use it.  If \"false\" then all requests\n         will block until the first searcher is done warming.\n      -->\n    <useColdSearcher>false</useColdSearcher>\n\n  </query>\n\n\n  <!-- Request Dispatcher\n\n       This section contains instructions for how the SolrDispatchFilter\n       should behave when processing requests for this SolrCore.\n\n       handleSelect is a legacy option that affects the behavior of requests\n       such as /select?qt=XXX\n\n       handleSelect=\"true\" will cause the SolrDispatchFilter to process\n       the request and dispatch the query to a handler specified by the\n       \"qt\" param, assuming \"/select\" isn't already registered.\n\n       handleSelect=\"false\" will cause the SolrDispatchFilter to\n       ignore \"/select\" requests, resulting in a 404 unless a handler\n       is explicitly registered with the name \"/select\"\n\n       handleSelect=\"true\" is not recommended for new users, but is the default\n       for backwards compatibility\n    -->\n  <requestDispatcher handleSelect=\"false\" >\n    <!-- Request Parsing\n\n         These settings indicate how Solr Requests may be parsed, and\n         what restrictions may be placed on the ContentStreams from\n         those requests\n\n         enableRemoteStreaming - enables use of the stream.file\n         and stream.url parameters for specifying remote streams.\n\n         multipartUploadLimitInKB - specifies the max size (in KiB) of\n         Multipart File Uploads that Solr will allow in a Request.\n\n         formdataUploadLimitInKB - specifies the max size (in KiB) of\n         form data (application/x-www-form-urlencoded) sent via\n         POST. You can use POST to pass request parameters not\n         fitting into the URL.\n\n         addHttpRequestToContext - if set to true, it will instruct\n         the requestParsers to include the original HttpServletRequest\n         object in the context map of the SolrQueryRequest under the\n         key \"httpRequest\". It will not be used by any of the existing\n         Solr components, but may be useful when developing custom\n         plugins.\n\n         *** WARNING ***\n         The settings below authorize Solr to fetch remote files, You\n         should make sure your system has some authentication before\n         using enableRemoteStreaming=\"true\"\n\n      -->\n    <requestParsers enableRemoteStreaming=\"true\"\n                    multipartUploadLimitInKB=\"2048000\"\n                    formdataUploadLimitInKB=\"2048\"\n                    addHttpRequestToContext=\"false\"/>\n\n    <!-- HTTP Caching\n\n         Set HTTP caching related parameters (for proxy caches and clients).\n\n         The options below instruct Solr not to output any HTTP Caching\n         related headers\n      -->\n    <httpCaching never304=\"true\" />\n    <!-- If you include a <cacheControl> directive, it will be used to\n         generate a Cache-Control header (as well as an Expires header\n         if the value contains \"max-age=\")\n\n         By default, no Cache-Control header is generated.\n\n         You can use the <cacheControl> option even if you have set\n         never304=\"true\"\n      -->\n    <!--\n       <httpCaching never304=\"true\" >\n         <cacheControl>max-age=30, public</cacheControl>\n       </httpCaching>\n      -->\n    <!-- To enable Solr to respond with automatically generated HTTP\n         Caching headers, and to response to Cache Validation requests\n         correctly, set the value of never304=\"false\"\n\n         This will cause Solr to generate Last-Modified and ETag\n         headers based on the properties of the Index.\n\n         The following options can also be specified to affect the\n         values of these headers...\n\n         lastModFrom - the default value is \"openTime\" which means the\n         Last-Modified value (and validation against If-Modified-Since\n         requests) will all be relative to when the current Searcher\n         was opened.  You can change it to lastModFrom=\"dirLastMod\" if\n         you want the value to exactly correspond to when the physical\n         index was last modified.\n\n         etagSeed=\"...\" is an option you can change to force the ETag\n         header (and validation against If-None-Match requests) to be\n         different even if the index has not changed (ie: when making\n         significant changes to your config file)\n\n         (lastModifiedFrom and etagSeed are both ignored if you use\n         the never304=\"true\" option)\n      -->\n    <!--\n       <httpCaching lastModifiedFrom=\"openTime\"\n                    etagSeed=\"Solr\">\n         <cacheControl>max-age=30, public</cacheControl>\n       </httpCaching>\n      -->\n  </requestDispatcher>\n\n  <!-- Request Handlers\n\n       http://wiki.apache.org/solr/SolrRequestHandler\n\n       Incoming queries will be dispatched to a specific handler by name\n       based on the path specified in the request.\n\n       Legacy behavior: If the request path uses \"/select\" but no Request\n       Handler has that name, and if handleSelect=\"true\" has been specified in\n       the requestDispatcher, then the Request Handler is dispatched based on\n       the qt parameter.  Handlers without a leading '/' are accessed this way\n       like so: http://host/app/[core/]select?qt=name  If no qt is\n       given, then the requestHandler that declares default=\"true\" will be\n       used or the one named \"standard\".\n\n       If a Request Handler is declared with startup=\"lazy\", then it will\n       not be initialized until the first request that uses it.\n\n    -->\n  <!-- SearchHandler\n\n       http://wiki.apache.org/solr/SearchHandler\n\n       For processing Search Queries, the primary Request Handler\n       provided with Solr is \"SearchHandler\" It delegates to a sequent\n       of SearchComponents (see below) and supports distributed\n       queries across multiple shards\n    -->\n  <requestHandler name=\"/select\" class=\"solr.SearchHandler\">\n    <!-- default values for query parameters can be specified, these\n         will be overridden by parameters in the request\n      -->\n    <lst name=\"defaults\">\n      <str name=\"echoParams\">explicit</str>\n      <int name=\"rows\">10</int>\n      <!-- <str name=\"df\">text</str> -->\n      <str name=\"spellcheck.dictionary\">default</str>\n      <str name=\"spellcheck\">on</str>\n      <str name=\"spellcheck.extendedResults\">true</str>\n      <str name=\"spellcheck.count\">10</str>\n      <str name=\"spellcheck.alternativeTermCount\">5</str>\n      <str name=\"spellcheck.maxResultsForSuggest\">5</str>\n      <str name=\"spellcheck.collate\">true</str>\n      <str name=\"spellcheck.collateExtendedResults\">true</str>\n      <str name=\"spellcheck.maxCollationTries\">10</str>\n      <str name=\"spellcheck.maxCollations\">5</str>\n    </lst>\n    <!-- In addition to defaults, \"appends\" params can be specified\n         to identify values which should be appended to the list of\n         multi-val params from the query (or the existing \"defaults\").\n      -->\n    <!-- In this example, the param \"fq=instock:true\" would be appended to\n         any query time fq params the user may specify, as a mechanism for\n         partitioning the index, independent of any user selected filtering\n         that may also be desired (perhaps as a result of faceted searching).\n\n         NOTE: there is *absolutely* nothing a client can do to prevent these\n         \"appends\" values from being used, so don't use this mechanism\n         unless you are sure you always want it.\n      -->\n    <!--\n       <lst name=\"appends\">\n         <str name=\"fq\">inStock:true</str>\n       </lst>\n      -->\n    <!-- \"invariants\" are a way of letting the Solr maintainer lock down\n         the options available to Solr clients.  Any params values\n         specified here are used regardless of what values may be specified\n         in either the query, the \"defaults\", or the \"appends\" params.\n\n         In this example, the facet.field and facet.query params would\n         be fixed, limiting the facets clients can use.  Faceting is\n         not turned on by default - but if the client does specify\n         facet=true in the request, these are the only facets they\n         will be able to see counts for; regardless of what other\n         facet.field or facet.query params they may specify.\n\n         NOTE: there is *absolutely* nothing a client can do to prevent these\n         \"invariants\" values from being used, so don't use this mechanism\n         unless you are sure you always want it.\n      -->\n    <!--\n       <lst name=\"invariants\">\n         <str name=\"facet.field\">cat</str>\n         <str name=\"facet.field\">manu_exact</str>\n         <str name=\"facet.query\">price:[* TO 500]</str>\n         <str name=\"facet.query\">price:[500 TO *]</str>\n       </lst>\n      -->\n    <!-- If the default list of SearchComponents is not desired, that\n         list can either be overridden completely, or components can be\n         prepended or appended to the default list.  (see below)\n      -->\n    <!--\n       <arr name=\"components\">\n         <str>nameOfCustomComponent1</str>\n         <str>nameOfCustomComponent2</str>\n       </arr>\n      -->\n      <arr name=\"last-components\">\n        <str>spellcheck</str>\n      </arr>\n  </requestHandler>\n\n  <!-- A request handler that returns indented JSON by default -->\n  <requestHandler name=\"/query\" class=\"solr.SearchHandler\">\n    <lst name=\"defaults\">\n      <str name=\"echoParams\">explicit</str>\n      <str name=\"wt\">json</str>\n      <str name=\"indent\">true</str>\n    </lst>\n  </requestHandler>\n\n  <requestHandler name=\"/mlt\" class=\"solr.MoreLikeThisHandler\" />\n  <!-- A Robust Example\n\n       This example SearchHandler declaration shows off usage of the\n       SearchHandler with many defaults declared\n\n       Note that multiple instances of the same Request Handler\n       (SearchHandler) can be registered multiple times with different\n       names (and different init parameters)\n    -->\n  <requestHandler name=\"/browse\" class=\"solr.SearchHandler\" useParams=\"query,facets,velocity,browse\">\n    <lst name=\"defaults\">\n      <str name=\"echoParams\">explicit</str>\n    </lst>\n  </requestHandler>\n\n  <initParams path=\"/update/**,/query,/select,/tvrh,/elevate,/spell,/browse\">\n    <lst name=\"defaults\">\n      <str name=\"df\">text</str>\n    </lst>\n  </initParams>\n\n  <initParams path=\"/update/**\">\n    <lst name=\"defaults\">\n      <str name=\"update.chain\">add-unknown-fields-to-the-schema</str>\n    </lst>\n  </initParams>\n\n  <!-- ping/healthcheck -->\n  <requestHandler name=\"/admin/ping\" class=\"solr.PingRequestHandler\">\n    <lst name=\"invariants\">\n      <str name=\"q\">solrpingquery</str>\n    </lst>\n    <lst name=\"defaults\">\n      <str name=\"echoParams\">all</str>\n    </lst>\n    <!-- An optional feature of the PingRequestHandler is to configure the\n         handler with a \"healthcheckFile\" which can be used to enable/disable\n         the PingRequestHandler.\n         relative paths are resolved against the data dir\n      -->\n    <!-- <str name=\"healthcheckFile\">server-enabled.txt</str> -->\n  </requestHandler>\n\n  <!-- Solr Cell Update Request Handler\n\n       http://wiki.apache.org/solr/ExtractingRequestHandler\n\n    -->\n  <requestHandler name=\"/update/extract\"\n                  startup=\"lazy\"\n                  class=\"solr.extraction.ExtractingRequestHandler\" >\n    <lst name=\"defaults\">\n      <str name=\"lowernames\">true</str>\n      <str name=\"fmap.meta\">ignored_</str>\n      <str name=\"fmap.content\">text</str>\n    </lst>\n  </requestHandler>\n\n  <!-- Search Components\n\n       Search components are registered to SolrCore and used by\n       instances of SearchHandler (which can access them by name)\n\n       By default, the following components are available:\n\n       <searchComponent name=\"query\"     class=\"solr.QueryComponent\" />\n       <searchComponent name=\"facet\"     class=\"solr.FacetComponent\" />\n       <searchComponent name=\"mlt\"       class=\"solr.MoreLikeThisComponent\" />\n       <searchComponent name=\"highlight\" class=\"solr.HighlightComponent\" />\n       <searchComponent name=\"stats\"     class=\"solr.StatsComponent\" />\n       <searchComponent name=\"debug\"     class=\"solr.DebugComponent\" />\n\n       Default configuration in a requestHandler would look like:\n\n       <arr name=\"components\">\n         <str>query</str>\n         <str>facet</str>\n         <str>mlt</str>\n         <str>highlight</str>\n         <str>stats</str>\n         <str>debug</str>\n       </arr>\n\n       If you register a searchComponent to one of the standard names,\n       that will be used instead of the default.\n\n       To insert components before or after the 'standard' components, use:\n\n       <arr name=\"first-components\">\n         <str>myFirstComponentName</str>\n       </arr>\n\n       <arr name=\"last-components\">\n         <str>myLastComponentName</str>\n       </arr>\n\n       NOTE: The component registered with the name \"debug\" will\n       always be executed after the \"last-components\"\n\n     -->\n\n  <!-- Spell Check\n\n       The spell check component can return a list of alternative spelling\n       suggestions.\n\n       http://wiki.apache.org/solr/SpellCheckComponent\n    -->\n  <searchComponent name=\"spellcheck\" class=\"solr.SpellCheckComponent\">\n\n    <str name=\"queryAnalyzerFieldType\">text_en</str>\n\n    <!-- Multiple \"Spell Checkers\" can be declared and used by this\n         component\n      -->\n\n    <!-- a spellchecker built from a field of the main index -->\n    <lst name=\"spellchecker\">\n      <str name=\"name\">default</str>\n      <str name=\"field\">text</str>\n      <str name=\"classname\">solr.DirectSolrSpellChecker</str>\n      <!-- the spellcheck distance measure used, the default is the internal levenshtein -->\n      <str name=\"distanceMeasure\">internal</str>\n      <!-- minimum accuracy needed to be considered a valid spellcheck suggestion -->\n      <float name=\"accuracy\">0.5</float>\n      <!-- the maximum #edits we consider when enumerating terms: can be 1 or 2 -->\n      <int name=\"maxEdits\">2</int>\n      <!-- the minimum shared prefix when enumerating terms -->\n      <int name=\"minPrefix\">1</int>\n      <!-- maximum number of inspections per result. -->\n      <int name=\"maxInspections\">5</int>\n      <!-- minimum length of a query term to be considered for correction -->\n      <int name=\"minQueryLength\">4</int>\n      <!-- maximum threshold of documents a query term can appear to be considered for correction -->\n      <float name=\"maxQueryFrequency\">0.01</float>\n      <!-- uncomment this to require suggestions to occur in 1% of the documents\n        <float name=\"thresholdTokenFrequency\">.01</float>\n      -->\n    </lst>\n\n    <!-- a spellchecker that can break or combine words.  See \"/spell\" handler below for usage -->\n    <!--\n    <lst name=\"spellchecker\">\n      <str name=\"name\">wordbreak</str>\n      <str name=\"classname\">solr.WordBreakSolrSpellChecker</str>\n      <str name=\"field\">name</str>\n      <str name=\"combineWords\">true</str>\n      <str name=\"breakWords\">true</str>\n      <int name=\"maxChanges\">10</int>\n    </lst>\n    -->\n  </searchComponent>\n\n  <!-- A request handler for demonstrating the spellcheck component.\n\n       NOTE: This is purely as an example.  The whole purpose of the\n       SpellCheckComponent is to hook it into the request handler that\n       handles your normal user queries so that a separate request is\n       not needed to get suggestions.\n\n       IN OTHER WORDS, THERE IS REALLY GOOD CHANCE THE SETUP BELOW IS\n       NOT WHAT YOU WANT FOR YOUR PRODUCTION SYSTEM!\n\n       See http://wiki.apache.org/solr/SpellCheckComponent for details\n       on the request parameters.\n    -->\n  <requestHandler name=\"/spell\" class=\"solr.SearchHandler\" startup=\"lazy\">\n    <lst name=\"defaults\">\n      <!-- Solr will use suggestions from both the 'default' spellchecker\n           and from the 'wordbreak' spellchecker and combine them.\n           collations (re-written queries) can include a combination of\n           corrections from both spellcheckers -->\n      <str name=\"spellcheck.dictionary\">default</str>\n      <str name=\"spellcheck\">on</str>\n      <str name=\"spellcheck.extendedResults\">true</str>\n      <str name=\"spellcheck.count\">10</str>\n      <str name=\"spellcheck.alternativeTermCount\">5</str>\n      <str name=\"spellcheck.maxResultsForSuggest\">5</str>\n      <str name=\"spellcheck.collate\">true</str>\n      <str name=\"spellcheck.collateExtendedResults\">true</str>\n      <str name=\"spellcheck.maxCollationTries\">10</str>\n      <str name=\"spellcheck.maxCollations\">5</str>\n    </lst>\n    <arr name=\"last-components\">\n      <str>spellcheck</str>\n    </arr>\n  </requestHandler>\n\n  <!-- Term Vector Component\n\n       http://wiki.apache.org/solr/TermVectorComponent\n    -->\n  <searchComponent name=\"tvComponent\" class=\"solr.TermVectorComponent\"/>\n\n  <!-- A request handler for demonstrating the term vector component\n\n       This is purely as an example.\n\n       In reality you will likely want to add the component to your\n       already specified request handlers.\n    -->\n  <requestHandler name=\"/tvrh\" class=\"solr.SearchHandler\" startup=\"lazy\">\n    <lst name=\"defaults\">\n      <bool name=\"tv\">true</bool>\n    </lst>\n    <arr name=\"last-components\">\n      <str>tvComponent</str>\n    </arr>\n  </requestHandler>\n\n  <!-- Clustering Component. (Omitted here. See the default Solr example for a typical configuration.) -->\n\n  <!-- Terms Component\n\n       http://wiki.apache.org/solr/TermsComponent\n\n       A component to return terms and document frequency of those\n       terms\n    -->\n  <searchComponent name=\"terms\" class=\"solr.TermsComponent\"/>\n\n  <!-- A request handler for demonstrating the terms component -->\n  <requestHandler name=\"/terms\" class=\"solr.SearchHandler\" startup=\"lazy\">\n    <lst name=\"defaults\">\n      <bool name=\"terms\">true</bool>\n      <bool name=\"distrib\">false</bool>\n    </lst>\n    <arr name=\"components\">\n      <str>terms</str>\n    </arr>\n  </requestHandler>\n\n\n  <!-- Query Elevation Component\n\n       http://wiki.apache.org/solr/QueryElevationComponent\n\n       a search component that enables you to configure the top\n       results for a given query regardless of the normal lucene\n       scoring.\n    -->\n  <searchComponent name=\"elevator\" class=\"solr.QueryElevationComponent\" >\n    <!-- pick a fieldType to analyze queries -->\n    <str name=\"queryFieldType\">string</str>\n    <str name=\"config-file\">elevate.xml</str>\n  </searchComponent>\n\n  <!-- A request handler for demonstrating the elevator component -->\n  <requestHandler name=\"/elevate\" class=\"solr.SearchHandler\" startup=\"lazy\">\n    <lst name=\"defaults\">\n      <str name=\"echoParams\">explicit</str>\n    </lst>\n    <arr name=\"last-components\">\n      <str>elevator</str>\n    </arr>\n  </requestHandler>\n\n  <!-- Highlighting Component\n\n       http://wiki.apache.org/solr/HighlightingParameters\n    -->\n  <searchComponent class=\"solr.HighlightComponent\" name=\"highlight\">\n    <highlighting>\n      <!-- Configure the standard fragmenter -->\n      <!-- This could most likely be commented out in the \"default\" case -->\n      <fragmenter name=\"gap\"\n                  default=\"true\"\n                  class=\"solr.highlight.GapFragmenter\">\n        <lst name=\"defaults\">\n          <int name=\"hl.fragsize\">100</int>\n        </lst>\n      </fragmenter>\n\n      <!-- A regular-expression-based fragmenter\n           (for sentence extraction)\n        -->\n      <fragmenter name=\"regex\"\n                  class=\"solr.highlight.RegexFragmenter\">\n        <lst name=\"defaults\">\n          <!-- slightly smaller fragsizes work better because of slop -->\n          <int name=\"hl.fragsize\">70</int>\n          <!-- allow 50% slop on fragment sizes -->\n          <float name=\"hl.regex.slop\">0.5</float>\n          <!-- a basic sentence pattern -->\n          <str name=\"hl.regex.pattern\">[-\\w ,/\\n\\&quot;&apos;]{20,200}</str>\n        </lst>\n      </fragmenter>\n\n      <!-- Configure the standard formatter -->\n      <formatter name=\"html\"\n                 default=\"true\"\n                 class=\"solr.highlight.HtmlFormatter\">\n        <lst name=\"defaults\">\n          <str name=\"hl.simple.pre\"><![CDATA[<em>]]></str>\n          <str name=\"hl.simple.post\"><![CDATA[</em>]]></str>\n        </lst>\n      </formatter>\n\n      <!-- Configure the standard encoder -->\n      <encoder name=\"html\"\n               class=\"solr.highlight.HtmlEncoder\" />\n\n      <!-- Configure the standard fragListBuilder -->\n      <fragListBuilder name=\"simple\"\n                       class=\"solr.highlight.SimpleFragListBuilder\"/>\n\n      <!-- Configure the single fragListBuilder -->\n      <fragListBuilder name=\"single\"\n                       class=\"solr.highlight.SingleFragListBuilder\"/>\n\n      <!-- Configure the weighted fragListBuilder -->\n      <fragListBuilder name=\"weighted\"\n                       default=\"true\"\n                       class=\"solr.highlight.WeightedFragListBuilder\"/>\n\n      <!-- default tag FragmentsBuilder -->\n      <fragmentsBuilder name=\"default\"\n                        default=\"true\"\n                        class=\"solr.highlight.ScoreOrderFragmentsBuilder\">\n        <!--\n        <lst name=\"defaults\">\n          <str name=\"hl.multiValuedSeparatorChar\">/</str>\n        </lst>\n        -->\n      </fragmentsBuilder>\n\n      <!-- multi-colored tag FragmentsBuilder -->\n      <fragmentsBuilder name=\"colored\"\n                        class=\"solr.highlight.ScoreOrderFragmentsBuilder\">\n        <lst name=\"defaults\">\n          <str name=\"hl.tag.pre\"><![CDATA[\n               <b style=\"background:yellow\">,<b style=\"background:lawgreen\">,\n               <b style=\"background:aquamarine\">,<b style=\"background:magenta\">,\n               <b style=\"background:palegreen\">,<b style=\"background:coral\">,\n               <b style=\"background:wheat\">,<b style=\"background:khaki\">,\n               <b style=\"background:lime\">,<b style=\"background:deepskyblue\">]]></str>\n          <str name=\"hl.tag.post\"><![CDATA[</b>]]></str>\n        </lst>\n      </fragmentsBuilder>\n\n      <boundaryScanner name=\"default\"\n                       default=\"true\"\n                       class=\"solr.highlight.SimpleBoundaryScanner\">\n        <lst name=\"defaults\">\n          <str name=\"hl.bs.maxScan\">10</str>\n          <str name=\"hl.bs.chars\">.,!? &#9;&#10;&#13;</str>\n        </lst>\n      </boundaryScanner>\n\n      <boundaryScanner name=\"breakIterator\"\n                       class=\"solr.highlight.BreakIteratorBoundaryScanner\">\n        <lst name=\"defaults\">\n          <!-- type should be one of CHARACTER, WORD(default), LINE and SENTENCE -->\n          <str name=\"hl.bs.type\">WORD</str>\n          <!-- language and country are used when constructing Locale object.  -->\n          <!-- And the Locale object will be used when getting instance of BreakIterator -->\n          <str name=\"hl.bs.language\">en</str>\n          <str name=\"hl.bs.country\">US</str>\n        </lst>\n      </boundaryScanner>\n    </highlighting>\n  </searchComponent>\n\n  <!-- Update Processors\n\n       Chains of Update Processor Factories for dealing with Update\n       Requests can be declared, and then used by name in Update\n       Request Processors\n\n       http://wiki.apache.org/solr/UpdateRequestProcessor\n\n    -->\n\n  <!-- Add unknown fields to the schema\n\n       An example field type guessing update processor that will\n       attempt to parse string-typed field values as Booleans, Longs,\n       Doubles, or Dates, and then add schema fields with the guessed\n       field types.\n\n       This requires that the schema is both managed and mutable, by\n       declaring schemaFactory as ManagedIndexSchemaFactory, with\n       mutable specified as true.\n\n       See http://wiki.apache.org/solr/GuessingFieldTypes\n    -->\n  <updateRequestProcessorChain name=\"add-unknown-fields-to-the-schema\">\n    <!-- UUIDUpdateProcessorFactory will generate an id if none is present in the incoming document -->\n    <processor class=\"solr.UUIDUpdateProcessorFactory\" />\n    <processor class=\"solr.RemoveBlankFieldUpdateProcessorFactory\"/>\n    <processor class=\"solr.FieldNameMutatingUpdateProcessorFactory\">\n      <str name=\"pattern\">[^\\w-\\.]</str>\n      <str name=\"replacement\">_</str>\n    </processor>\n    <processor class=\"solr.ParseBooleanFieldUpdateProcessorFactory\"/>\n    <processor class=\"solr.ParseLongFieldUpdateProcessorFactory\"/>\n    <processor class=\"solr.ParseDoubleFieldUpdateProcessorFactory\"/>\n    <processor class=\"solr.ParseDateFieldUpdateProcessorFactory\">\n      <arr name=\"format\">\n        <str>yyyy-MM-dd'T'HH:mm:ss.SSSZ</str>\n        <str>yyyy-MM-dd'T'HH:mm:ss,SSSZ</str>\n        <str>yyyy-MM-dd'T'HH:mm:ss.SSS</str>\n        <str>yyyy-MM-dd'T'HH:mm:ss,SSS</str>\n        <str>yyyy-MM-dd'T'HH:mm:ssZ</str>\n        <str>yyyy-MM-dd'T'HH:mm:ss</str>\n        <str>yyyy-MM-dd'T'HH:mmZ</str>\n        <str>yyyy-MM-dd'T'HH:mm</str>\n        <str>yyyy-MM-dd HH:mm:ss.SSSZ</str>\n        <str>yyyy-MM-dd HH:mm:ss,SSSZ</str>\n        <str>yyyy-MM-dd HH:mm:ss.SSS</str>\n        <str>yyyy-MM-dd HH:mm:ss,SSS</str>\n        <str>yyyy-MM-dd HH:mm:ssZ</str>\n        <str>yyyy-MM-dd HH:mm:ss</str>\n        <str>yyyy-MM-dd HH:mmZ</str>\n        <str>yyyy-MM-dd HH:mm</str>\n        <str>yyyy-MM-dd</str>\n      </arr>\n    </processor>\n    <!--<processor class=\"solr.AddSchemaFieldsUpdateProcessorFactory\">\n      <str name=\"defaultFieldType\">strings</str>\n      <lst name=\"typeMapping\">\n        <str name=\"valueClass\">java.lang.Boolean</str>\n        <str name=\"fieldType\">booleans</str>\n      </lst>\n      <lst name=\"typeMapping\">\n        <str name=\"valueClass\">java.util.Date</str>\n        <str name=\"fieldType\">tdates</str>\n      </lst>\n      <lst name=\"typeMapping\">\n        <str name=\"valueClass\">java.lang.Long</str>\n        <str name=\"valueClass\">java.lang.Integer</str>\n        <str name=\"fieldType\">tlongs</str>\n      </lst>\n      <lst name=\"typeMapping\">\n        <str name=\"valueClass\">java.lang.Number</str>\n        <str name=\"fieldType\">tdoubles</str>\n      </lst>\n    </processor>-->\n    <processor class=\"solr.LogUpdateProcessorFactory\"/>\n    <processor class=\"solr.DistributedUpdateProcessorFactory\"/>\n    <processor class=\"solr.RunUpdateProcessorFactory\"/>\n  </updateRequestProcessorChain>\n\n  <!-- Deduplication\n\n       An example dedup update processor that creates the \"id\" field\n       on the fly based on the hash code of some other fields.  This\n       example has overwriteDupes set to false since we are using the\n       id field as the signatureField and Solr will maintain\n       uniqueness based on that anyway.\n\n    -->\n  <!--\n     <updateRequestProcessorChain name=\"dedupe\">\n       <processor class=\"solr.processor.SignatureUpdateProcessorFactory\">\n         <bool name=\"enabled\">true</bool>\n         <str name=\"signatureField\">id</str>\n         <bool name=\"overwriteDupes\">false</bool>\n         <str name=\"fields\">name,features,cat</str>\n         <str name=\"signatureClass\">solr.processor.Lookup3Signature</str>\n       </processor>\n       <processor class=\"solr.LogUpdateProcessorFactory\" />\n       <processor class=\"solr.RunUpdateProcessorFactory\" />\n     </updateRequestProcessorChain>\n    -->\n\n  <!-- Language identification\n\n       This example update chain identifies the language of the incoming\n       documents using the langid contrib. The detected language is\n       written to field language_s. No field name mapping is done.\n       The fields used for detection are text, title, subject and description,\n       making this example suitable for detecting languages form full-text\n       rich documents injected via ExtractingRequestHandler.\n       See more about langId at http://wiki.apache.org/solr/LanguageDetection\n    -->\n  <!--\n   <updateRequestProcessorChain name=\"langid\">\n     <processor class=\"org.apache.solr.update.processor.TikaLanguageIdentifierUpdateProcessorFactory\">\n       <str name=\"langid.fl\">text,title,subject,description</str>\n       <str name=\"langid.langField\">language_s</str>\n       <str name=\"langid.fallback\">en</str>\n     </processor>\n     <processor class=\"solr.LogUpdateProcessorFactory\" />\n     <processor class=\"solr.RunUpdateProcessorFactory\" />\n   </updateRequestProcessorChain>\n  -->\n\n  <!-- Script update processor\n\n    This example hooks in an update processor implemented using JavaScript.\n\n    See more about the script update processor at http://wiki.apache.org/solr/ScriptUpdateProcessor\n  -->\n  <!--\n    <updateRequestProcessorChain name=\"script\">\n      <processor class=\"solr.StatelessScriptUpdateProcessorFactory\">\n        <str name=\"script\">update-script.js</str>\n        <lst name=\"params\">\n          <str name=\"config_param\">example config parameter</str>\n        </lst>\n      </processor>\n      <processor class=\"solr.RunUpdateProcessorFactory\" />\n    </updateRequestProcessorChain>\n  -->\n\n  <!-- Response Writers\n\n       http://wiki.apache.org/solr/QueryResponseWriter\n\n       Request responses will be written using the writer specified by\n       the 'wt' request parameter matching the name of a registered\n       writer.\n\n       The \"default\" writer is the default and will be used if 'wt' is\n       not specified in the request.\n    -->\n  <!-- The following response writers are implicitly configured unless\n       overridden...\n    -->\n  <!--\n     <queryResponseWriter name=\"xml\"\n                          default=\"true\"\n                          class=\"solr.XMLResponseWriter\" />\n     <queryResponseWriter name=\"json\" class=\"solr.JSONResponseWriter\"/>\n     <queryResponseWriter name=\"python\" class=\"solr.PythonResponseWriter\"/>\n     <queryResponseWriter name=\"ruby\" class=\"solr.RubyResponseWriter\"/>\n     <queryResponseWriter name=\"php\" class=\"solr.PHPResponseWriter\"/>\n     <queryResponseWriter name=\"phps\" class=\"solr.PHPSerializedResponseWriter\"/>\n     <queryResponseWriter name=\"csv\" class=\"solr.CSVResponseWriter\"/>\n     <queryResponseWriter name=\"schema.xml\" class=\"solr.SchemaXmlResponseWriter\"/>\n    -->\n\n  <queryResponseWriter name=\"json\" class=\"solr.JSONResponseWriter\">\n    <!-- For the purposes of the tutorial, JSON responses are written as\n     plain text so that they are easy to read in *any* browser.\n     If you expect a MIME type of \"application/json\" just remove this override.\n    -->\n    <str name=\"content-type\">text/plain; charset=UTF-8</str>\n  </queryResponseWriter>\n\n  <!--\n     Custom response writers can be declared as needed...\n    -->\n  <queryResponseWriter name=\"velocity\" class=\"solr.VelocityResponseWriter\" startup=\"lazy\">\n    <str name=\"template.base.dir\">${velocity.template.base.dir:}</str>\n    <str name=\"solr.resource.loader.enabled\">${velocity.solr.resource.loader.enabled:true}</str>\n    <str name=\"params.resource.loader.enabled\">${velocity.params.resource.loader.enabled:false}</str>\n  </queryResponseWriter>\n\n  <!-- XSLT response writer transforms the XML output by any xslt file found\n       in Solr's conf/xslt directory.  Changes to xslt files are checked for\n       every xsltCacheLifetimeSeconds.\n    -->\n  <queryResponseWriter name=\"xslt\" class=\"solr.XSLTResponseWriter\">\n    <int name=\"xsltCacheLifetimeSeconds\">5</int>\n  </queryResponseWriter>\n\n  <!-- Query Parsers\n\n       https://cwiki.apache.org/confluence/display/solr/Query+Syntax+and+Parsing\n\n       Multiple QParserPlugins can be registered by name, and then\n       used in either the \"defType\" param for the QueryComponent (used\n       by SearchHandler) or in LocalParams\n    -->\n  <!-- example of registering a query parser -->\n  <!--\n     <queryParser name=\"myparser\" class=\"com.mycompany.MyQParserPlugin\"/>\n    -->\n\n  <!-- Function Parsers\n\n       http://wiki.apache.org/solr/FunctionQuery\n\n       Multiple ValueSourceParsers can be registered by name, and then\n       used as function names when using the \"func\" QParser.\n    -->\n  <!-- example of registering a custom function parser  -->\n  <!--\n     <valueSourceParser name=\"myfunc\"\n                        class=\"com.mycompany.MyValueSourceParser\" />\n    -->\n\n\n  <!-- Document Transformers\n       http://wiki.apache.org/solr/DocTransformers\n    -->\n  <!--\n     Could be something like:\n     <transformer name=\"db\" class=\"com.mycompany.LoadFromDatabaseTransformer\" >\n       <int name=\"connection\">jdbc://....</int>\n     </transformer>\n\n     To add a constant value to all docs, use:\n     <transformer name=\"mytrans2\" class=\"org.apache.solr.response.transform.ValueAugmenterFactory\" >\n       <int name=\"value\">5</int>\n     </transformer>\n\n     If you want the user to still be able to change it with _value:something_ use this:\n     <transformer name=\"mytrans3\" class=\"org.apache.solr.response.transform.ValueAugmenterFactory\" >\n       <double name=\"defaultValue\">5</double>\n     </transformer>\n\n      If you are using the QueryElevationComponent, you may wish to mark documents that get boosted.  The\n      EditorialMarkerFactory will do exactly that:\n     <transformer name=\"qecBooster\" class=\"org.apache.solr.response.transform.EditorialMarkerFactory\" />\n    -->\n\n    <!-- Extras noted by users of django-haystack -->\n    <requestHandler name=\"/analysis/field\"\n                  startup=\"lazy\"\n                  class=\"solr.FieldAnalysisRequestHandler\" />\n    <requestHandler name=\"/analysis/document\"\n                  class=\"solr.DocumentAnalysisRequestHandler\"\n                  startup=\"lazy\" />\n</config>\n"
  },
  {
    "path": "test_haystack/solr_tests/server/get-solr-download-url.py",
    "content": "#!/usr/bin/env python\nimport sys\nfrom itertools import chain\nfrom urllib.parse import urljoin\n\nimport requests\n\nif len(sys.argv) != 2:\n    print(\"Usage: %s SOLR_VERSION\" % sys.argv[0], file=sys.stderr)\n    sys.exit(1)\n\nsolr_version = sys.argv[1]\ntarball = \"solr-{0}.tgz\".format(solr_version)\ndist_path = \"lucene/solr/{0}/{1}\".format(solr_version, tarball)\n\ndownload_url = urljoin(\"https://archive.apache.org/dist/\", dist_path)\nmirror_response = requests.get(\n    \"https://www.apache.org/dyn/mirrors/mirrors.cgi/%s?asjson=1\" % dist_path\n)\n\nif not mirror_response.ok:\n    print(\n        \"Apache mirror request returned HTTP %d\" % mirror_response.status_code,\n        file=sys.stderr,\n    )\n    sys.exit(1)\n\nmirror_data = mirror_response.json()\n\n# Since the Apache mirrors are often unreliable and releases may disappear without notice we'll\n# try the preferred mirror, all of the alternates and backups, and fall back to the main Apache\n# archive server:\nfor base_url in chain(\n    (mirror_data[\"preferred\"],),\n    mirror_data[\"http\"],\n    mirror_data[\"backup\"],\n    (\"https://archive.apache.org/dist/\",),\n):\n    test_url = urljoin(base_url, mirror_data[\"path_info\"])\n\n    # The Apache mirror script's response format has recently changed to exclude the actual file paths:\n    if not test_url.endswith(tarball):\n        test_url = urljoin(test_url, dist_path)\n\n    try:\n        if requests.head(test_url, allow_redirects=True).status_code == 200:\n            download_url = test_url\n            break\n    except requests.exceptions.ConnectionError:\n        continue\nelse:\n    print(\"None of the Apache mirrors have %s\" % dist_path, file=sys.stderr)\n    sys.exit(1)\n\nprint(download_url)\n"
  },
  {
    "path": "test_haystack/solr_tests/server/start-solr-test-server.sh",
    "content": "#!/bin/bash\n\nset -e\n\nSOLR_VERSION=6.6.4\nSOLR_DIR=solr\n\n\nSOLR_PORT=9001\n\ncd $(dirname $0)\n\nexport TEST_ROOT=$(pwd)\n\nexport SOLR_ARCHIVE=\"${SOLR_VERSION}.tgz\"\n\nif [ -d \"${HOME}/download-cache/\" ]; then\n    export SOLR_ARCHIVE=\"${HOME}/download-cache/${SOLR_ARCHIVE}\"\nfi\n\nif [ -f ${SOLR_ARCHIVE} ]; then\n    # If the tarball doesn't extract cleanly, remove it so it'll download again:\n    tar -tf ${SOLR_ARCHIVE} > /dev/null || rm ${SOLR_ARCHIVE}\nfi\n\nif [ ! -f ${SOLR_ARCHIVE} ]; then\n    SOLR_DOWNLOAD_URL=$(python get-solr-download-url.py $SOLR_VERSION)\n    curl -Lo $SOLR_ARCHIVE ${SOLR_DOWNLOAD_URL} || (echo \"Unable to download ${SOLR_DOWNLOAD_URL}\"; exit 2)\nfi\n\necho \"Extracting Solr ${SOLR_ARCHIVE} to ${TEST_ROOT}/${SOLR_DIR}\"\nrm -rf ${SOLR_DIR}\nmkdir ${SOLR_DIR}\nFULL_SOLR_DIR=$(readlink -f ./${SOLR_DIR})\ntar -C ${SOLR_DIR} -xf ${SOLR_ARCHIVE} --strip-components=1\n\n# These tuning options will break on Java 10 and for testing we don't care about\n# production server optimizations:\nexport GC_LOG_OPTS=\"\"\nexport GC_TUNE=\"\"\n\nexport SOLR_LOGS_DIR=\"${FULL_SOLR_DIR}/logs\"\n\ninstall -d ${SOLR_LOGS_DIR}\n\necho \"Changing into ${FULL_SOLR_DIR} \"\n\ncd ${FULL_SOLR_DIR}\n\necho \"Creating Solr Core\"\n./bin/solr start -p ${SOLR_PORT}\n./bin/solr create -c collection1 -p ${SOLR_PORT} -n basic_config\n./bin/solr create -c mgmnt -p ${SOLR_PORT}\n\necho \"Solr system information:\"\ncurl --fail --silent 'http://localhost:9001/solr/admin/info/system?wt=json&indent=on' | python -m json.tool\n./bin/solr stop -p ${SOLR_PORT}\n\nCONF_DIR=${TEST_ROOT}/confdir\nCORE_DIR=${FULL_SOLR_DIR}/server/solr/collection1\nmv ${CORE_DIR}/conf/managed-schema ${CORE_DIR}/conf/managed-schema.old\ncp ${CONF_DIR}/* ${CORE_DIR}/conf/\n\necho 'Starting server'\ncd server\n# We use exec to allow process monitors to correctly kill the\n# actual Java process rather than this launcher script:\nexport CMD=\"java -Djetty.port=${SOLR_PORT} -Djava.awt.headless=true -Dapple.awt.UIElement=true -jar start.jar --module=http -Dsolr.install.dir=${FULL_SOLR_DIR} -Dsolr.log.dir=${SOLR_LOGS_DIR}\"\n\nif [ -z \"${BACKGROUND_SOLR}\" ]; then\n    exec $CMD\nelse\n    exec $CMD >/dev/null &\nfi\n"
  },
  {
    "path": "test_haystack/solr_tests/server/wait-for-solr",
    "content": "#!/usr/bin/env python\n\"\"\"Simple throttle to wait for Solr to start on busy test servers\"\"\"\nimport sys\nimport time\n\nimport requests\n\nmax_retries = 100\nretry_count = 0\nretry_delay = 15\nstatus_url = 'http://localhost:9001/solr/collection1/admin/ping'\n\n\nwhile retry_count < max_retries:\n    status_code = 0\n\n    try:\n        r = requests.get(status_url)\n        status_code = r.status_code\n        if status_code == 200:\n            sys.exit(0)\n    except Exception as exc:\n        print('Unhandled exception requesting %s: %s' % (status_url, exc), file=sys.stderr)\n\n    retry_count += 1\n\n    print('Waiting {0} seconds for Solr to start (retry #{1}, status {2})'.format(retry_delay,\n                                                                                  retry_count,\n                                                                                  status_code),\n          file=sys.stderr)\n    time.sleep(retry_delay)\n\n\nprint(\"Solr took too long to start (#%d retries)\" % retry_count, file=sys.stderr)\nsys.exit(1)\n"
  },
  {
    "path": "test_haystack/solr_tests/test_admin.py",
    "content": "from django.conf import settings\nfrom django.contrib.auth.models import User\nfrom django.test import TestCase\nfrom django.test.utils import override_settings\nfrom django.urls import reverse\n\nfrom haystack import connections, reset_search_queries\nfrom haystack.utils.loading import UnifiedIndex\n\nfrom ..core.models import MockModel\nfrom .test_solr_backend import SolrMockModelSearchIndex, clear_solr_index\n\n\n@override_settings(DEBUG=True)\nclass SearchModelAdminTestCase(TestCase):\n    fixtures = [\"base_data.json\", \"bulk_data.json\"]\n\n    def setUp(self):\n        super().setUp()\n\n        # With the models setup, you get the proper bits.\n        # Stow.\n        self.old_ui = connections[\"solr\"].get_unified_index()\n        self.ui = UnifiedIndex()\n        smmsi = SolrMockModelSearchIndex()\n        self.ui.build(indexes=[smmsi])\n        connections[\"solr\"]._index = self.ui\n\n        # Wipe it clean.\n        clear_solr_index()\n\n        # Force indexing of the content.\n        smmsi.update(using=\"solr\")\n\n        superuser = User.objects.create_superuser(\n            username=\"superuser\", password=\"password\", email=\"super@user.com\"\n        )\n\n    def tearDown(self):\n        # Restore.\n        connections[\"solr\"]._index = self.old_ui\n        super().tearDown()\n\n    def test_usage(self):\n        reset_search_queries()\n        self.assertEqual(len(connections[\"solr\"].queries), 0)\n\n        self.assertEqual(\n            self.client.login(username=\"superuser\", password=\"password\"), True\n        )\n\n        # First, non-search behavior.\n        resp = self.client.get(\"/admin/core/mockmodel/\")\n        self.assertEqual(resp.status_code, 200)\n        self.assertEqual(len(connections[\"solr\"].queries), 0)\n        self.assertEqual(resp.context[\"cl\"].full_result_count, 23)\n\n        # Then search behavior.\n        resp = self.client.get(\"/admin/core/mockmodel/\", data={\"q\": \"Haystack\"})\n        self.assertEqual(resp.status_code, 200)\n        self.assertEqual(len(connections[\"solr\"].queries), 3)\n        self.assertEqual(resp.context[\"cl\"].full_result_count, 23)\n        # Ensure they aren't search results.\n        self.assertEqual(isinstance(resp.context[\"cl\"].result_list[0], MockModel), True)\n\n        result_pks = [i.pk for i in resp.context[\"cl\"].result_list]\n        self.assertIn(5, result_pks)\n\n        # Make sure only changelist is affected.\n        resp = self.client.get(reverse(\"admin:core_mockmodel_change\", args=(1,)))\n        self.assertEqual(resp.status_code, 200)\n        self.assertEqual(resp.context[\"original\"].id, 1)\n        self.assertTemplateUsed(resp, \"admin/change_form.html\")\n\n        # The Solr query count should be unchanged:\n        self.assertEqual(len(connections[\"solr\"].queries), 3)\n"
  },
  {
    "path": "test_haystack/solr_tests/test_inputs.py",
    "content": "from django.test import TestCase\n\nfrom haystack import connections, inputs\n\n\nclass SolrInputTestCase(TestCase):\n    def setUp(self):\n        super().setUp()\n        self.query_obj = connections[\"solr\"].get_query()\n\n    def test_raw_init(self):\n        raw = inputs.Raw(\"hello OR there, :you\")\n        self.assertEqual(raw.query_string, \"hello OR there, :you\")\n        self.assertEqual(raw.kwargs, {})\n        self.assertEqual(raw.post_process, False)\n\n        raw = inputs.Raw(\"hello OR there, :you\", test=\"really\")\n        self.assertEqual(raw.query_string, \"hello OR there, :you\")\n        self.assertEqual(raw.kwargs, {\"test\": \"really\"})\n        self.assertEqual(raw.post_process, False)\n\n    def test_raw_prepare(self):\n        raw = inputs.Raw(\"hello OR there, :you\")\n        self.assertEqual(raw.prepare(self.query_obj), \"hello OR there, :you\")\n\n    def test_clean_init(self):\n        clean = inputs.Clean(\"hello OR there, :you\")\n        self.assertEqual(clean.query_string, \"hello OR there, :you\")\n        self.assertEqual(clean.post_process, True)\n\n    def test_clean_prepare(self):\n        clean = inputs.Clean(\"hello OR there, :you\")\n        self.assertEqual(clean.prepare(self.query_obj), \"hello or there, \\\\:you\")\n\n    def test_exact_init(self):\n        exact = inputs.Exact(\"hello OR there, :you\")\n        self.assertEqual(exact.query_string, \"hello OR there, :you\")\n        self.assertEqual(exact.post_process, True)\n\n    def test_exact_prepare(self):\n        exact = inputs.Exact(\"hello OR there, :you\")\n        self.assertEqual(exact.prepare(self.query_obj), '\"hello OR there, :you\"')\n\n        exact = inputs.Exact(\"hello OR there, :you\", clean=True)\n        self.assertEqual(exact.prepare(self.query_obj), '\"hello or there, \\\\:you\"')\n\n    def test_not_init(self):\n        not_it = inputs.Not(\"hello OR there, :you\")\n        self.assertEqual(not_it.query_string, \"hello OR there, :you\")\n        self.assertEqual(not_it.post_process, True)\n\n    def test_not_prepare(self):\n        not_it = inputs.Not(\"hello OR there, :you\")\n        self.assertEqual(not_it.prepare(self.query_obj), \"NOT (hello or there, \\\\:you)\")\n\n    def test_autoquery_init(self):\n        autoquery = inputs.AutoQuery('panic -don\\'t \"froody dude\"')\n        self.assertEqual(autoquery.query_string, 'panic -don\\'t \"froody dude\"')\n        self.assertEqual(autoquery.post_process, False)\n\n    def test_autoquery_prepare(self):\n        autoquery = inputs.AutoQuery('panic -don\\'t \"froody dude\"')\n        self.assertEqual(\n            autoquery.prepare(self.query_obj), 'panic NOT don\\'t \"froody dude\"'\n        )\n\n    def test_altparser_init(self):\n        altparser = inputs.AltParser(\"dismax\")\n        self.assertEqual(altparser.parser_name, \"dismax\")\n        self.assertEqual(altparser.query_string, \"\")\n        self.assertEqual(altparser.kwargs, {})\n        self.assertEqual(altparser.post_process, False)\n\n        altparser = inputs.AltParser(\"dismax\", \"douglas adams\", qf=\"author\", mm=1)\n        self.assertEqual(altparser.parser_name, \"dismax\")\n        self.assertEqual(altparser.query_string, \"douglas adams\")\n        self.assertEqual(altparser.kwargs, {\"mm\": 1, \"qf\": \"author\"})\n        self.assertEqual(altparser.post_process, False)\n\n    def test_altparser_prepare(self):\n        altparser = inputs.AltParser(\"dismax\", \"douglas adams\", qf=\"author\", mm=1)\n        self.assertEqual(\n            altparser.prepare(self.query_obj),\n            \"\"\"_query_:\"{!dismax mm=1 qf=author}douglas adams\\\"\"\"\",\n        )\n\n        altparser = inputs.AltParser(\"dismax\", \"Don't panic\", qf=\"text author\", mm=1)\n        self.assertEqual(\n            altparser.prepare(self.query_obj),\n            \"\"\"_query_:\"{!dismax mm=1 qf='text author'}Don't panic\\\"\"\"\",\n        )\n"
  },
  {
    "path": "test_haystack/solr_tests/test_solr_backend.py",
    "content": "import datetime\nimport logging as std_logging\nimport os\nimport pickle\nimport unittest\nfrom decimal import Decimal\nfrom unittest.mock import patch\n\nimport pysolr\nfrom django.conf import settings\nfrom django.test import TestCase\nfrom django.test.utils import override_settings\nfrom pkg_resources import parse_version\n\nfrom haystack import connections, indexes, reset_search_queries\nfrom haystack.exceptions import SkipDocument\nfrom haystack.inputs import AltParser, AutoQuery, Raw\nfrom haystack.models import SearchResult\nfrom haystack.query import SQ, RelatedSearchQuerySet, SearchQuerySet\nfrom haystack.utils.loading import UnifiedIndex\n\nfrom ..core.models import AFourthMockModel, AnotherMockModel, ASixthMockModel, MockModel\nfrom ..mocks import MockSearchResult\n\n\ndef clear_solr_index():\n    # Wipe it clean.\n    print(\"Clearing out Solr...\")\n    raw_solr = pysolr.Solr(settings.HAYSTACK_CONNECTIONS[\"solr\"][\"URL\"])\n    raw_solr.delete(q=\"*:*\")\n\n\nclass SolrMockSearchIndex(indexes.SearchIndex, indexes.Indexable):\n    text = indexes.CharField(document=True, use_template=True)\n    name = indexes.CharField(model_attr=\"author\", faceted=True)\n    pub_date = indexes.DateTimeField(model_attr=\"pub_date\")\n\n    def get_model(self):\n        return MockModel\n\n\nclass SolrMockSearchIndexWithSkipDocument(SolrMockSearchIndex):\n    def prepare_text(self, obj):\n        if obj.author == \"daniel3\":\n            raise SkipDocument\n        return \"Indexed!\\n%s\" % obj.id\n\n\nclass SolrMockOverriddenFieldNameSearchIndex(indexes.SearchIndex, indexes.Indexable):\n    text = indexes.CharField(document=True, use_template=True)\n    name = indexes.CharField(\n        model_attr=\"author\", faceted=True, index_fieldname=\"name_s\"\n    )\n    pub_date = indexes.DateField(model_attr=\"pub_date\", index_fieldname=\"pub_date_dt\")\n    today = indexes.IntegerField(index_fieldname=\"today_i\")\n\n    def prepare_today(self, obj):\n        return datetime.datetime.now().day\n\n    def get_model(self):\n        return MockModel\n\n\nclass SolrMaintainTypeMockSearchIndex(indexes.SearchIndex, indexes.Indexable):\n    text = indexes.CharField(document=True, use_template=True)\n    month = indexes.CharField(indexed=False)\n    pub_date = indexes.DateTimeField(model_attr=\"pub_date\")\n\n    def prepare_month(self, obj):\n        return \"%02d\" % obj.pub_date.month\n\n    def get_model(self):\n        return MockModel\n\n\nclass SolrMockModelSearchIndex(indexes.SearchIndex, indexes.Indexable):\n    text = indexes.CharField(model_attr=\"foo\", document=True)\n    name = indexes.CharField(model_attr=\"author\")\n    pub_date = indexes.DateTimeField(model_attr=\"pub_date\")\n\n    def get_model(self):\n        return MockModel\n\n\nclass SolrAnotherMockModelSearchIndex(indexes.SearchIndex, indexes.Indexable):\n    text = indexes.CharField(document=True)\n    name = indexes.CharField(model_attr=\"author\")\n    pub_date = indexes.DateTimeField(model_attr=\"pub_date\")\n\n    def get_model(self):\n        return AnotherMockModel\n\n    def prepare_text(self, obj):\n        return \"You might be searching for the user %s\" % obj.author\n\n\nclass SolrBoostMockSearchIndex(indexes.SearchIndex, indexes.Indexable):\n    text = indexes.CharField(\n        document=True,\n        use_template=True,\n        template_name=\"search/indexes/core/mockmodel_template.txt\",\n    )\n    author = indexes.CharField(model_attr=\"author\", weight=2.0)\n    editor = indexes.CharField(model_attr=\"editor\")\n    pub_date = indexes.DateTimeField(model_attr=\"pub_date\")\n\n    def get_model(self):\n        return AFourthMockModel\n\n\nclass SolrRoundTripSearchIndex(indexes.SearchIndex, indexes.Indexable):\n    text = indexes.CharField(document=True, default=\"\")\n    name = indexes.CharField()\n    is_active = indexes.BooleanField()\n    post_count = indexes.IntegerField()\n    average_rating = indexes.FloatField()\n    price = indexes.DecimalField()\n    pub_date = indexes.DateField()\n    created = indexes.DateTimeField()\n    tags = indexes.MultiValueField()\n    sites = indexes.MultiValueField()\n\n    def get_model(self):\n        return MockModel\n\n    def prepare(self, obj):\n        prepped = super().prepare(obj)\n        prepped.update(\n            {\n                \"text\": \"This is some example text.\",\n                \"name\": \"Mister Pants\",\n                \"is_active\": True,\n                \"post_count\": 25,\n                \"average_rating\": 3.6,\n                \"price\": Decimal(\"24.99\"),\n                \"pub_date\": datetime.date(2009, 11, 21),\n                \"created\": datetime.datetime(2009, 11, 21, 21, 31, 00),\n                \"tags\": [\"staff\", \"outdoor\", \"activist\", \"scientist\"],\n                \"sites\": [3, 5, 1],\n            }\n        )\n        return prepped\n\n\nclass SolrComplexFacetsMockSearchIndex(indexes.SearchIndex, indexes.Indexable):\n    text = indexes.CharField(document=True, default=\"\")\n    name = indexes.CharField(faceted=True)\n    is_active = indexes.BooleanField(faceted=True)\n    post_count = indexes.IntegerField()\n    post_count_i = indexes.FacetIntegerField(facet_for=\"post_count\")\n    average_rating = indexes.FloatField(faceted=True)\n    pub_date = indexes.DateField(faceted=True)\n    created = indexes.DateTimeField(faceted=True)\n    sites = indexes.MultiValueField(faceted=True)\n\n    def get_model(self):\n        return MockModel\n\n\nclass SolrAutocompleteMockModelSearchIndex(indexes.SearchIndex, indexes.Indexable):\n    text = indexes.CharField(model_attr=\"foo\", document=True)\n    name = indexes.CharField(model_attr=\"author\")\n    pub_date = indexes.DateTimeField(model_attr=\"pub_date\")\n    text_auto = indexes.EdgeNgramField(model_attr=\"foo\")\n    name_auto = indexes.EdgeNgramField(model_attr=\"author\")\n\n    def get_model(self):\n        return MockModel\n\n\nclass SolrSpatialSearchIndex(indexes.SearchIndex, indexes.Indexable):\n    text = indexes.CharField(model_attr=\"name\", document=True)\n    location = indexes.LocationField()\n\n    def prepare_location(self, obj):\n        return \"%s,%s\" % (obj.lat, obj.lon)\n\n    def get_model(self):\n        return ASixthMockModel\n\n\nclass SolrQuotingMockSearchIndex(indexes.SearchIndex, indexes.Indexable):\n    text = indexes.CharField(document=True, use_template=True)\n\n    def get_model(self):\n        return MockModel\n\n    def prepare_text(self, obj):\n        return \"\"\"Don't panic but %s has been iñtërnâtiônàlizéð\"\"\" % obj.author\n\n\nclass SolrSearchBackendTestCase(TestCase):\n    def setUp(self):\n        super().setUp()\n\n        # Wipe it clean.\n        self.raw_solr = pysolr.Solr(settings.HAYSTACK_CONNECTIONS[\"solr\"][\"URL\"])\n        clear_solr_index()\n\n        # Stow.\n        self.old_ui = connections[\"solr\"].get_unified_index()\n        self.ui = UnifiedIndex()\n        self.smmi = SolrMockSearchIndex()\n        self.smmidni = SolrMockSearchIndexWithSkipDocument()\n        self.smtmmi = SolrMaintainTypeMockSearchIndex()\n        self.smofnmi = SolrMockOverriddenFieldNameSearchIndex()\n        self.ui.build(indexes=[self.smmi])\n        connections[\"solr\"]._index = self.ui\n        self.sb = connections[\"solr\"].get_backend()\n        self.sq = connections[\"solr\"].get_query()\n\n        self.sample_objs = []\n\n        for i in range(1, 4):\n            mock = MockModel()\n            mock.id = i\n            mock.author = \"daniel%s\" % i\n            mock.pub_date = datetime.date(2009, 2, 25) - datetime.timedelta(days=i)\n            self.sample_objs.append(mock)\n\n    def tearDown(self):\n        connections[\"solr\"]._index = self.old_ui\n        super().tearDown()\n\n    def test_non_silent(self):\n        bad_sb = connections[\"solr\"].backend(\n            \"bad\", URL=\"http://omg.wtf.bbq:1000/solr\", SILENTLY_FAIL=False, TIMEOUT=1\n        )\n\n        try:\n            bad_sb.update(self.smmi, self.sample_objs)\n            self.fail()\n        except:\n            pass\n\n        try:\n            bad_sb.remove(\"core.mockmodel.1\")\n            self.fail()\n        except:\n            pass\n\n        try:\n            bad_sb.clear()\n            self.fail()\n        except:\n            pass\n\n        try:\n            bad_sb.search(\"foo\")\n            self.fail()\n        except:\n            pass\n\n    def test_update(self):\n        self.sb.update(self.smmi, self.sample_objs)\n\n        results = self.raw_solr.search(\"*:*\")\n        for result in results:\n            del result[\"_version_\"]\n        # Check what Solr thinks is there.\n        self.assertEqual(results.hits, 3)\n        self.assertEqual(\n            results.docs,\n            [\n                {\n                    \"django_id\": \"1\",\n                    \"django_ct\": \"core.mockmodel\",\n                    \"name\": \"daniel1\",\n                    \"name_exact\": \"daniel1\",\n                    \"text\": \"Indexed!\\n1\",\n                    \"pub_date\": \"2009-02-24T00:00:00Z\",\n                    \"id\": \"core.mockmodel.1\",\n                },\n                {\n                    \"django_id\": \"2\",\n                    \"django_ct\": \"core.mockmodel\",\n                    \"name\": \"daniel2\",\n                    \"name_exact\": \"daniel2\",\n                    \"text\": \"Indexed!\\n2\",\n                    \"pub_date\": \"2009-02-23T00:00:00Z\",\n                    \"id\": \"core.mockmodel.2\",\n                },\n                {\n                    \"django_id\": \"3\",\n                    \"django_ct\": \"core.mockmodel\",\n                    \"name\": \"daniel3\",\n                    \"name_exact\": \"daniel3\",\n                    \"text\": \"Indexed!\\n3\",\n                    \"pub_date\": \"2009-02-22T00:00:00Z\",\n                    \"id\": \"core.mockmodel.3\",\n                },\n            ],\n        )\n\n    def test_update_with_SkipDocument_raised(self):\n        self.sb.update(self.smmidni, self.sample_objs)\n\n        res = self.raw_solr.search(\"*:*\")\n\n        # Check what Solr thinks is there.\n        self.assertEqual(res.hits, 2)\n        self.assertListEqual(\n            sorted([x[\"id\"] for x in res.docs]),\n            [\"core.mockmodel.1\", \"core.mockmodel.2\"],\n        )\n\n    def test_remove(self):\n        self.sb.update(self.smmi, self.sample_objs)\n        self.assertEqual(self.raw_solr.search(\"*:*\").hits, 3)\n\n        self.sb.remove(self.sample_objs[0])\n        results = self.raw_solr.search(\"*:*\")\n        for result in results:\n            del result[\"_version_\"]\n        self.assertEqual(results.hits, 2)\n        self.assertEqual(\n            results.docs,\n            [\n                {\n                    \"django_id\": \"2\",\n                    \"django_ct\": \"core.mockmodel\",\n                    \"name\": \"daniel2\",\n                    \"name_exact\": \"daniel2\",\n                    \"text\": \"Indexed!\\n2\",\n                    \"pub_date\": \"2009-02-23T00:00:00Z\",\n                    \"id\": \"core.mockmodel.2\",\n                },\n                {\n                    \"django_id\": \"3\",\n                    \"django_ct\": \"core.mockmodel\",\n                    \"name\": \"daniel3\",\n                    \"name_exact\": \"daniel3\",\n                    \"text\": \"Indexed!\\n3\",\n                    \"pub_date\": \"2009-02-22T00:00:00Z\",\n                    \"id\": \"core.mockmodel.3\",\n                },\n            ],\n        )\n\n    def test_clear(self):\n        self.sb.update(self.smmi, self.sample_objs)\n        self.assertEqual(self.raw_solr.search(\"*:*\").hits, 3)\n\n        self.sb.clear()\n        self.assertEqual(self.raw_solr.search(\"*:*\").hits, 0)\n\n        self.sb.update(self.smmi, self.sample_objs)\n        self.assertEqual(self.raw_solr.search(\"*:*\").hits, 3)\n\n        self.sb.clear([AnotherMockModel])\n        self.assertEqual(self.raw_solr.search(\"*:*\").hits, 3)\n\n        self.sb.clear([MockModel])\n        self.assertEqual(self.raw_solr.search(\"*:*\").hits, 0)\n\n        self.sb.update(self.smmi, self.sample_objs)\n        self.assertEqual(self.raw_solr.search(\"*:*\").hits, 3)\n\n        self.sb.clear([AnotherMockModel, MockModel])\n        self.assertEqual(self.raw_solr.search(\"*:*\").hits, 0)\n\n    def test_alternate_index_fieldname(self):\n        self.ui.build(indexes=[self.smofnmi])\n        connections[\"solr\"]._index = self.ui\n        self.sb.update(self.smofnmi, self.sample_objs)\n        search = self.sb.search(\"*\")\n        self.assertEqual(search[\"hits\"], 3)\n        results = search[\"results\"]\n        today = datetime.datetime.now().day\n        self.assertEqual([result.today for result in results], [today, today, today])\n        self.assertEqual(\n            [result.name for result in results], [\"daniel1\", \"daniel2\", \"daniel3\"]\n        )\n        self.assertEqual(\n            [result.pub_date for result in results],\n            [\n                datetime.date(2009, 2, 25) - datetime.timedelta(days=1),\n                datetime.date(2009, 2, 25) - datetime.timedelta(days=2),\n                datetime.date(2009, 2, 25) - datetime.timedelta(days=3),\n            ],\n        )\n        # revert it back\n        self.ui.build(indexes=[self.smmi])\n        connections[\"solr\"]._index = self.ui\n\n    def test_search(self):\n        self.sb.update(self.smmi, self.sample_objs)\n        self.assertEqual(self.raw_solr.search(\"*:*\").hits, 3)\n\n        self.assertEqual(self.sb.search(\"\"), {\"hits\": 0, \"results\": []})\n        self.assertEqual(self.sb.search(\"*:*\")[\"hits\"], 3)\n        self.assertEqual(\n            [result.pk for result in self.sb.search(\"*:*\")[\"results\"]], [\"1\", \"2\", \"3\"]\n        )\n\n        self.assertEqual(self.sb.search(\"\", highlight=True), {\"hits\": 0, \"results\": []})\n        self.assertEqual(self.sb.search(\"Index\", highlight=True)[\"hits\"], 3)\n        self.assertEqual(\n            [\n                result.highlighted[\"text\"][0]\n                for result in self.sb.search(\"Index\", highlight=True)[\"results\"]\n            ],\n            [\"<em>Indexed</em>!\\n1\", \"<em>Indexed</em>!\\n2\", \"<em>Indexed</em>!\\n3\"],\n        )\n\n        # shortened highlighting options\n        highlight_dict = {\"simple.pre\": \"<i>\", \"simple.post\": \"</i>\"}\n        self.assertEqual(\n            self.sb.search(\"\", highlight=highlight_dict), {\"hits\": 0, \"results\": []}\n        )\n        self.assertEqual(self.sb.search(\"Index\", highlight=highlight_dict)[\"hits\"], 3)\n        self.assertEqual(\n            [\n                result.highlighted[\"text\"][0]\n                for result in self.sb.search(\"Index\", highlight=highlight_dict)[\n                    \"results\"\n                ]\n            ],\n            [\"<i>Indexed</i>!\\n1\", \"<i>Indexed</i>!\\n2\", \"<i>Indexed</i>!\\n3\"],\n        )\n\n        # full-form highlighting options\n        highlight_dict = {\"hl.simple.pre\": \"<i>\", \"hl.simple.post\": \"</i>\"}\n        self.assertEqual(\n            [\n                result.highlighted[\"text\"][0]\n                for result in self.sb.search(\"Index\", highlight=highlight_dict)[\n                    \"results\"\n                ]\n            ],\n            [\"<i>Indexed</i>!\\n1\", \"<i>Indexed</i>!\\n2\", \"<i>Indexed</i>!\\n3\"],\n        )\n\n        self.assertEqual(self.sb.search(\"Indx\")[\"hits\"], 0)\n        self.assertEqual(self.sb.search(\"indax\")[\"spelling_suggestion\"], \"index\")\n        self.assertEqual(\n            self.sb.search(\"Indx\", spelling_query=\"indexy\")[\"spelling_suggestion\"],\n            \"index\",\n        )\n\n        self.assertEqual(\n            self.sb.search(\"\", facets={\"name\": {}}), {\"hits\": 0, \"results\": []}\n        )\n        results = self.sb.search(\"Index\", facets={\"name\": {}})\n        self.assertEqual(results[\"hits\"], 3)\n        self.assertEqual(\n            results[\"facets\"][\"fields\"][\"name\"],\n            [(\"daniel1\", 1), (\"daniel2\", 1), (\"daniel3\", 1)],\n        )\n\n        self.assertEqual(\n            self.sb.search(\n                \"\",\n                date_facets={\n                    \"pub_date\": {\n                        \"start_date\": datetime.date(2008, 2, 26),\n                        \"end_date\": datetime.date(2008, 3, 26),\n                        \"gap_by\": \"month\",\n                        \"gap_amount\": 1,\n                    }\n                },\n            ),\n            {\"hits\": 0, \"results\": []},\n        )\n        results = self.sb.search(\n            \"Index\",\n            date_facets={\n                \"pub_date\": {\n                    \"start_date\": datetime.date(2008, 2, 26),\n                    \"end_date\": datetime.date(2008, 3, 26),\n                    \"gap_by\": \"month\",\n                    \"gap_amount\": 1,\n                }\n            },\n        )\n        self.assertEqual(results[\"hits\"], 3)\n        # DRL_TODO: Correct output but no counts. Another case of needing better test data?\n        # self.assertEqual(results['facets']['dates']['pub_date'], {'end': '2008-02-26T00:00:00Z', 'gap': '/MONTH'})\n\n        self.assertEqual(\n            self.sb.search(\"\", query_facets=[(\"name\", \"[* TO e]\")]),\n            {\"hits\": 0, \"results\": []},\n        )\n        results = self.sb.search(\"Index\", query_facets=[(\"name\", \"[* TO e]\")])\n        self.assertEqual(results[\"hits\"], 3)\n        self.assertEqual(results[\"facets\"][\"queries\"], {\"name:[* TO e]\": 3})\n\n        self.assertEqual(self.sb.search(\"\", stats={}), {\"hits\": 0, \"results\": []})\n        results = self.sb.search(\"*:*\", stats={\"name\": [\"name\"]})\n        self.assertEqual(results[\"hits\"], 3)\n        self.assertEqual(results[\"stats\"][\"name\"][\"count\"], 3)\n\n        self.assertEqual(\n            self.sb.search(\"\", narrow_queries=set([\"name:daniel1\"])),\n            {\"hits\": 0, \"results\": []},\n        )\n        results = self.sb.search(\"Index\", narrow_queries=set([\"name:daniel1\"]))\n        self.assertEqual(results[\"hits\"], 1)\n\n        # Ensure that swapping the ``result_class`` works.\n        results = self.sb.search(\"index\", result_class=MockSearchResult)\n        self.assertIsInstance(\n            self.sb.search(\"index\", result_class=MockSearchResult)[\"results\"][0],\n            MockSearchResult,\n        )\n\n        # Check the use of ``limit_to_registered_models``.\n        self.assertEqual(\n            self.sb.search(\"\", limit_to_registered_models=False),\n            {\"hits\": 0, \"results\": []},\n        )\n        self.assertEqual(\n            self.sb.search(\"*:*\", limit_to_registered_models=False)[\"hits\"], 3\n        )\n        self.assertEqual(\n            [\n                result.pk\n                for result in self.sb.search(\"*:*\", limit_to_registered_models=False)[\n                    \"results\"\n                ]\n            ],\n            [\"1\", \"2\", \"3\"],\n        )\n\n        # Stow.\n        old_limit_to_registered_models = getattr(\n            settings, \"HAYSTACK_LIMIT_TO_REGISTERED_MODELS\", True\n        )\n        settings.HAYSTACK_LIMIT_TO_REGISTERED_MODELS = False\n\n        self.assertEqual(self.sb.search(\"\"), {\"hits\": 0, \"results\": []})\n        self.assertEqual(self.sb.search(\"*:*\")[\"hits\"], 3)\n        self.assertEqual(\n            [result.pk for result in self.sb.search(\"*:*\")[\"results\"]], [\"1\", \"2\", \"3\"]\n        )\n\n        # Restore.\n        settings.HAYSTACK_LIMIT_TO_REGISTERED_MODELS = old_limit_to_registered_models\n\n    def test_spelling(self):\n        self.sb.update(self.smmi, self.sample_objs)\n\n        self.assertEqual(self.sb.search(\"Indx\")[\"hits\"], 0)\n        self.assertEqual(self.sb.search(\"indax\")[\"spelling_suggestion\"], \"index\")\n        self.assertEqual(\n            self.sb.search(\"Indx\", spelling_query=\"indexy\")[\"spelling_suggestion\"],\n            \"index\",\n        )\n\n    def test_spatial_search_parameters(self):\n        from django.contrib.gis.geos import Point\n\n        p1 = Point(1.23, 4.56)\n        kwargs = self.sb.build_search_kwargs(\n            \"*:*\",\n            distance_point={\"field\": \"location\", \"point\": p1},\n            sort_by=\"distance asc\",\n        )\n\n        # Points in Solr are lat, lon pairs but Django GIS Point() uses lon, lat so we'll check for the flip\n        # See https://django-haystack.readthedocs.io/en/latest/spatial.html#points\n        self.assertEqual(kwargs.get(\"pt\"), \"4.56,1.23\")\n        self.assertEqual(kwargs.get(\"sfield\"), \"location\")\n        self.assertEqual(kwargs.get(\"sort\"), \"geodist() asc\")\n\n    def test_altparser_query(self):\n        self.sb.update(self.smmi, self.sample_objs)\n\n        results = self.sb.search(\n            AltParser(\"dismax\", \"daniel1\", qf=\"name\", mm=1).prepare(self.sq)\n        )\n        self.assertEqual(results[\"hits\"], 1)\n\n        # This should produce exactly the same result since all we have are mockmodel instances but we simply\n        # want to confirm that using the AltParser doesn't break other options:\n        results = self.sb.search(\n            AltParser(\"dismax\", \"daniel1\", qf=\"name\", mm=1).prepare(self.sq),\n            narrow_queries=set((\"django_ct:core.mockmodel\",)),\n        )\n        self.assertEqual(results[\"hits\"], 1)\n\n        results = self.sb.search(\n            AltParser(\"dismax\", \"+indexed +daniel1\", qf=\"text name\", mm=1).prepare(\n                self.sq\n            )\n        )\n        self.assertEqual(results[\"hits\"], 1)\n\n        self.sq.add_filter(SQ(name=AltParser(\"dismax\", \"daniel1\", qf=\"name\", mm=1)))\n        self.sq.add_filter(SQ(text=\"indexed\"))\n\n        new_q = self.sq._clone()\n        new_q._reset()\n\n        new_q.add_filter(SQ(name=\"daniel1\"))\n        new_q.add_filter(SQ(text=AltParser(\"dismax\", \"indexed\", qf=\"text\")))\n\n        results = new_q.get_results()\n        self.assertEqual(len(results), 1)\n        self.assertEqual(results[0].id, \"core.mockmodel.1\")\n\n    def test_raw_query(self):\n        self.sb.update(self.smmi, self.sample_objs)\n\n        # Ensure that the raw bits have proper parenthesis.\n        new_q = self.sq._clone()\n        new_q._reset()\n        new_q.add_filter(SQ(content=Raw(\"{!dismax qf='title^2 text' mm=1}my query\")))\n\n        results = new_q.get_results()\n        self.assertEqual(len(results), 0)\n\n    def test_altparser_quoting(self):\n        test_objs = [\n            MockModel(id=1, author=\"Foo d'Bar\", pub_date=datetime.date.today()),\n            MockModel(id=2, author=\"Baaz Quuz\", pub_date=datetime.date.today()),\n        ]\n        self.sb.update(SolrQuotingMockSearchIndex(), test_objs)\n\n        results = self.sb.search(\n            AltParser(\"dismax\", \"+don't +quuz\", qf=\"text\").prepare(self.sq)\n        )\n        self.assertEqual(results[\"hits\"], 1)\n\n    def test_more_like_this(self):\n        self.sb.update(self.smmi, self.sample_objs)\n        self.assertEqual(self.raw_solr.search(\"*:*\").hits, 3)\n\n        # A functional MLT example with enough data to work is below. Rely on\n        # this to ensure the API is correct enough.\n        self.assertEqual(self.sb.more_like_this(self.sample_objs[0])[\"hits\"], 0)\n        self.assertEqual(\n            [\n                result.pk\n                for result in self.sb.more_like_this(self.sample_objs[0])[\"results\"]\n            ],\n            [],\n        )\n\n    def test_build_schema(self):\n        old_ui = connections[\"solr\"].get_unified_index()\n\n        (content_field_name, fields) = self.sb.build_schema(old_ui.all_searchfields())\n        self.assertEqual(content_field_name, \"text\")\n        self.assertEqual(len(fields), 4)\n        self.assertEqual(\n            sorted(fields, key=lambda x: x[\"field_name\"]),\n            [\n                {\n                    \"indexed\": \"true\",\n                    \"type\": \"text_en\",\n                    \"stored\": \"true\",\n                    \"field_name\": \"name\",\n                    \"multi_valued\": \"false\",\n                },\n                {\n                    \"indexed\": \"true\",\n                    \"field_name\": \"name_exact\",\n                    \"stored\": \"true\",\n                    \"type\": \"string\",\n                    \"multi_valued\": \"false\",\n                },\n                {\n                    \"indexed\": \"true\",\n                    \"type\": \"date\",\n                    \"stored\": \"true\",\n                    \"field_name\": \"pub_date\",\n                    \"multi_valued\": \"false\",\n                },\n                {\n                    \"indexed\": \"true\",\n                    \"type\": \"text_en\",\n                    \"stored\": \"true\",\n                    \"field_name\": \"text\",\n                    \"multi_valued\": \"false\",\n                },\n            ],\n        )\n\n        ui = UnifiedIndex()\n        ui.build(indexes=[SolrComplexFacetsMockSearchIndex()])\n        (content_field_name, fields) = self.sb.build_schema(ui.all_searchfields())\n        self.assertEqual(content_field_name, \"text\")\n        self.assertEqual(len(fields), 15)\n        fields = sorted(fields, key=lambda field: field[\"field_name\"])\n        self.assertEqual(\n            fields,\n            [\n                {\n                    \"field_name\": \"average_rating\",\n                    \"indexed\": \"true\",\n                    \"multi_valued\": \"false\",\n                    \"stored\": \"true\",\n                    \"type\": \"float\",\n                },\n                {\n                    \"field_name\": \"average_rating_exact\",\n                    \"indexed\": \"true\",\n                    \"multi_valued\": \"false\",\n                    \"stored\": \"true\",\n                    \"type\": \"float\",\n                },\n                {\n                    \"field_name\": \"created\",\n                    \"indexed\": \"true\",\n                    \"multi_valued\": \"false\",\n                    \"stored\": \"true\",\n                    \"type\": \"date\",\n                },\n                {\n                    \"field_name\": \"created_exact\",\n                    \"indexed\": \"true\",\n                    \"multi_valued\": \"false\",\n                    \"stored\": \"true\",\n                    \"type\": \"date\",\n                },\n                {\n                    \"field_name\": \"is_active\",\n                    \"indexed\": \"true\",\n                    \"multi_valued\": \"false\",\n                    \"stored\": \"true\",\n                    \"type\": \"boolean\",\n                },\n                {\n                    \"field_name\": \"is_active_exact\",\n                    \"indexed\": \"true\",\n                    \"multi_valued\": \"false\",\n                    \"stored\": \"true\",\n                    \"type\": \"boolean\",\n                },\n                {\n                    \"field_name\": \"name\",\n                    \"indexed\": \"true\",\n                    \"multi_valued\": \"false\",\n                    \"stored\": \"true\",\n                    \"type\": \"text_en\",\n                },\n                {\n                    \"field_name\": \"name_exact\",\n                    \"indexed\": \"true\",\n                    \"multi_valued\": \"false\",\n                    \"stored\": \"true\",\n                    \"type\": \"string\",\n                },\n                {\n                    \"field_name\": \"post_count\",\n                    \"indexed\": \"true\",\n                    \"multi_valued\": \"false\",\n                    \"stored\": \"true\",\n                    \"type\": \"long\",\n                },\n                {\n                    \"field_name\": \"post_count_i\",\n                    \"indexed\": \"true\",\n                    \"multi_valued\": \"false\",\n                    \"stored\": \"true\",\n                    \"type\": \"long\",\n                },\n                {\n                    \"field_name\": \"pub_date\",\n                    \"indexed\": \"true\",\n                    \"multi_valued\": \"false\",\n                    \"stored\": \"true\",\n                    \"type\": \"date\",\n                },\n                {\n                    \"field_name\": \"pub_date_exact\",\n                    \"indexed\": \"true\",\n                    \"multi_valued\": \"false\",\n                    \"stored\": \"true\",\n                    \"type\": \"date\",\n                },\n                {\n                    \"field_name\": \"sites\",\n                    \"indexed\": \"true\",\n                    \"multi_valued\": \"true\",\n                    \"stored\": \"true\",\n                    \"type\": \"text_en\",\n                },\n                {\n                    \"field_name\": \"sites_exact\",\n                    \"indexed\": \"true\",\n                    \"multi_valued\": \"true\",\n                    \"stored\": \"true\",\n                    \"type\": \"string\",\n                },\n                {\n                    \"field_name\": \"text\",\n                    \"indexed\": \"true\",\n                    \"multi_valued\": \"false\",\n                    \"stored\": \"true\",\n                    \"type\": \"text_en\",\n                },\n            ],\n        )\n\n    def test_verify_type(self):\n        old_ui = connections[\"solr\"].get_unified_index()\n        ui = UnifiedIndex()\n        smtmmi = SolrMaintainTypeMockSearchIndex()\n        ui.build(indexes=[smtmmi])\n        connections[\"solr\"]._index = ui\n        sb = connections[\"solr\"].get_backend()\n        sb.update(smtmmi, self.sample_objs)\n\n        self.assertEqual(sb.search(\"*:*\")[\"hits\"], 3)\n        self.assertEqual(\n            [result.month for result in sb.search(\"*:*\")[\"results\"]], [\"02\", \"02\", \"02\"]\n        )\n        connections[\"solr\"]._index = old_ui\n\n\nclass CaptureHandler(std_logging.Handler):\n    logs_seen = []\n\n    def emit(self, record):\n        CaptureHandler.logs_seen.append(record)\n\n\n@patch(\"pysolr.Solr._send_request\", side_effect=pysolr.SolrError)\n@patch(\"logging.Logger.log\")\nclass FailedSolrSearchBackendTestCase(TestCase):\n    def test_all_cases(self, mock_send_request, mock_log):\n        self.sample_objs = []\n\n        for i in range(1, 4):\n            mock = MockModel()\n            mock.id = i\n            mock.author = \"daniel%s\" % i\n            mock.pub_date = datetime.date(2009, 2, 25) - datetime.timedelta(days=i)\n            self.sample_objs.append(mock)\n\n        # Setup the rest of the bits.\n        ui = UnifiedIndex()\n        smmi = SolrMockSearchIndex()\n        ui.build(indexes=[smmi])\n        connections[\"solr\"]._index = ui\n        sb = connections[\"solr\"].get_backend()\n\n        # Prior to the addition of the try/except bits, these would all fail miserably.\n        sb.update(smmi, self.sample_objs)\n        self.assertEqual(mock_log.call_count, 1)\n\n        sb.remove(self.sample_objs[0])\n        self.assertEqual(mock_log.call_count, 2)\n\n        sb.search(\"search\")\n        self.assertEqual(mock_log.call_count, 3)\n\n        sb.more_like_this(self.sample_objs[0])\n        self.assertEqual(mock_log.call_count, 4)\n\n        sb.clear([MockModel])\n        self.assertEqual(mock_log.call_count, 5)\n\n        sb.clear()\n        self.assertEqual(mock_log.call_count, 6)\n\n\nclass LiveSolrSearchQueryTestCase(TestCase):\n    fixtures = [\"base_data.json\"]\n\n    def setUp(self):\n        super().setUp()\n\n        # Wipe it clean.\n        clear_solr_index()\n\n        # Stow.\n        self.old_ui = connections[\"solr\"].get_unified_index()\n        self.ui = UnifiedIndex()\n        self.smmi = SolrMockSearchIndex()\n        self.ui.build(indexes=[self.smmi])\n        connections[\"solr\"]._index = self.ui\n        self.sb = connections[\"solr\"].get_backend()\n        self.sq = connections[\"solr\"].get_query()\n\n        # Force indexing of the content.\n        self.smmi.update(\"solr\")\n\n    def tearDown(self):\n        connections[\"solr\"]._index = self.old_ui\n        super().tearDown()\n\n    def test_get_spelling(self):\n        self.sq.add_filter(SQ(content=\"Indexy\"))\n\n        # Default collate + spelling path\n        self.assertEqual(self.sq.get_spelling_suggestion(), \"(index)\")\n        self.assertEqual(self.sq.get_spelling_suggestion(\"indexy\"), \"(index)\")\n\n        # Just spelling path\n        self.sq.run(spelling_query=\"Indexy\", collate=False)\n        self.assertEqual(self.sq._spelling_suggestion, \"index\")\n\n    def test_log_query(self):\n        reset_search_queries()\n        self.assertEqual(len(connections[\"solr\"].queries), 0)\n\n        with self.settings(DEBUG=False):\n            len(self.sq.get_results())\n            self.assertEqual(len(connections[\"solr\"].queries), 0)\n\n        with self.settings(DEBUG=True):\n            # Redefine it to clear out the cached results.\n            self.sq = connections[\"solr\"].get_query()\n            self.sq.add_filter(SQ(name=\"bar\"))\n            len(self.sq.get_results())\n            self.assertEqual(len(connections[\"solr\"].queries), 1)\n            self.assertEqual(\n                connections[\"solr\"].queries[0][\"query_string\"], \"name:(bar)\"\n            )\n\n            # And again, for good measure.\n            self.sq = connections[\"solr\"].get_query()\n            self.sq.add_filter(SQ(name=\"bar\"))\n            self.sq.add_filter(SQ(text=\"moof\"))\n            len(self.sq.get_results())\n            self.assertEqual(len(connections[\"solr\"].queries), 2)\n            self.assertEqual(\n                connections[\"solr\"].queries[0][\"query_string\"], \"name:(bar)\"\n            )\n            self.assertEqual(\n                connections[\"solr\"].queries[1][\"query_string\"],\n                \"(name:(bar) AND text:(moof))\",\n            )\n\n\n@override_settings(DEBUG=True)\nclass LiveSolrSearchQuerySetTestCase(TestCase):\n    \"\"\"Used to test actual implementation details of the SearchQuerySet.\"\"\"\n\n    fixtures = [\"base_data.json\", \"bulk_data.json\"]\n\n    @classmethod\n    def setUpClass(cls):\n        super(LiveSolrSearchQuerySetTestCase, cls).setUpClass()\n        cls._index_updated = False\n\n    @classmethod\n    def tearDownClass(cls):\n        del cls._index_updated\n        super(LiveSolrSearchQuerySetTestCase, cls).tearDownClass()\n\n    def setUp(self):\n        super().setUp()\n\n        # Stow.\n        self.old_ui = connections[\"solr\"].get_unified_index()\n        self.ui = UnifiedIndex()\n        self.smmi = SolrMockSearchIndex()\n        self.ui.build(indexes=[self.smmi])\n        connections[\"solr\"]._index = self.ui\n\n        self.sqs = SearchQuerySet(\"solr\")\n        self.rsqs = RelatedSearchQuerySet(\"solr\")\n\n        if not self._index_updated:\n            std_logging.info(\"Reindexing test data\")\n\n            # Wipe it clean.\n            clear_solr_index()\n\n            # Force indexing of the content.\n            self.smmi.update(\"solr\")\n\n            self._index_updated = True\n\n    def tearDown(self):\n        # Restore.\n        connections[\"solr\"]._index = self.old_ui\n        super().tearDown()\n\n    def test_load_all(self):\n        sqs = self.sqs.load_all()\n        self.assertTrue(len(sqs) > 0)\n        # load_all should not change the results or their ordering:\n        self.assertListEqual([i.id for i in sqs], [i.id for i in self.sqs])\n        self.assertTrue(isinstance(sqs, SearchQuerySet))\n        self.maxDiff = None\n        self.assertEqual(\n            sqs[0].object.foo,\n            \"Registering indexes in Haystack is very similar to registering models and ``ModelAdmin`` classes in the `Django admin site`_.  If you want to override the default indexing behavior for your model you can specify your own ``SearchIndex`` class.  This is useful for ensuring that future-dated or non-live content is not indexed and searchable. Our ``Note`` model has a ``pub_date`` field, so let's update our code to include our own ``SearchIndex`` to exclude indexing future-dated notes:\",\n        )\n\n    def test_iter(self):\n        reset_search_queries()\n        self.assertEqual(len(connections[\"solr\"].queries), 0)\n        sqs = self.sqs.all()\n        results = [int(result.pk) for result in iter(sqs)]\n        self.assertEqual(results, list(range(1, 24)))\n        self.assertEqual(len(connections[\"solr\"].queries), 3)\n\n    def test_slice(self):\n        reset_search_queries()\n        self.assertEqual(len(connections[\"solr\"].queries), 0)\n        results = self.sqs.all()\n        self.assertEqual(\n            [int(result.pk) for result in results[1:11]],\n            [2, 3, 4, 5, 6, 7, 8, 9, 10, 11],\n        )\n        self.assertEqual(len(connections[\"solr\"].queries), 1)\n\n        reset_search_queries()\n        self.assertEqual(len(connections[\"solr\"].queries), 0)\n        results = self.sqs.all()\n        self.assertEqual(int(results[21].pk), 22)\n        self.assertEqual(len(connections[\"solr\"].queries), 1)\n\n    def test_values_list_slice(self):\n        reset_search_queries()\n        self.assertEqual(len(connections[\"solr\"].queries), 0)\n\n        # TODO: this would be a good candidate for refactoring into a TestCase subclass shared across backends\n\n        # The values will come back as strings because Hasytack doesn't assume PKs are integers.\n        # We'll prepare this set once since we're going to query the same results in multiple ways:\n        expected_pks = [str(i) for i in [3, 2, 4, 5, 6, 7, 8, 9, 10, 11]]\n\n        results = self.sqs.all().order_by(\"pub_date\").values(\"pk\")\n        self.assertListEqual([i[\"pk\"] for i in results[1:11]], expected_pks)\n\n        results = self.sqs.all().order_by(\"pub_date\").values_list(\"pk\")\n        self.assertListEqual([i[0] for i in results[1:11]], expected_pks)\n\n        results = self.sqs.all().order_by(\"pub_date\").values_list(\"pk\", flat=True)\n        self.assertListEqual(results[1:11], expected_pks)\n\n        self.assertEqual(len(connections[\"solr\"].queries), 3)\n\n    def test_count(self):\n        reset_search_queries()\n        self.assertEqual(len(connections[\"solr\"].queries), 0)\n        sqs = self.sqs.all()\n        self.assertEqual(sqs.count(), 23)\n        self.assertEqual(sqs.count(), 23)\n        self.assertEqual(len(sqs), 23)\n        self.assertEqual(sqs.count(), 23)\n        # Should only execute one query to count the length of the result set.\n        self.assertEqual(len(connections[\"solr\"].queries), 1)\n\n    def test_manual_iter(self):\n        results = self.sqs.all()\n\n        reset_search_queries()\n        self.assertEqual(len(connections[\"solr\"].queries), 0)\n        results = [int(result.pk) for result in results._manual_iter()]\n        self.assertEqual(results, list(range(1, 24)))\n        self.assertEqual(len(connections[\"solr\"].queries), 3)\n\n    def test_fill_cache(self):\n        reset_search_queries()\n        self.assertEqual(len(connections[\"solr\"].queries), 0)\n        results = self.sqs.all()\n        self.assertEqual(len(results._result_cache), 0)\n        self.assertEqual(len(connections[\"solr\"].queries), 0)\n        results._fill_cache(0, 10)\n        self.assertEqual(\n            len([result for result in results._result_cache if result is not None]), 10\n        )\n        self.assertEqual(len(connections[\"solr\"].queries), 1)\n        results._fill_cache(10, 20)\n        self.assertEqual(\n            len([result for result in results._result_cache if result is not None]), 20\n        )\n        self.assertEqual(len(connections[\"solr\"].queries), 2)\n\n    def test_cache_is_full(self):\n        reset_search_queries()\n        self.assertEqual(len(connections[\"solr\"].queries), 0)\n        self.assertEqual(self.sqs._cache_is_full(), False)\n        results = self.sqs.all()\n        fire_the_iterator_and_fill_cache = list(results)\n        self.assertEqual(23, len(fire_the_iterator_and_fill_cache))\n        self.assertEqual(results._cache_is_full(), True)\n        self.assertEqual(len(connections[\"solr\"].queries), 4)\n\n    def test___and__(self):\n        sqs1 = self.sqs.filter(content=\"foo\")\n        sqs2 = self.sqs.filter(content=\"bar\")\n        sqs = sqs1 & sqs2\n\n        self.assertTrue(isinstance(sqs, SearchQuerySet))\n        self.assertEqual(len(sqs.query.query_filter), 2)\n        self.assertEqual(sqs.query.build_query(), \"((foo) AND (bar))\")\n\n        # Now for something more complex...\n        sqs3 = self.sqs.exclude(title=\"moof\").filter(\n            SQ(content=\"foo\") | SQ(content=\"baz\")\n        )\n        sqs4 = self.sqs.filter(content=\"bar\")\n        sqs = sqs3 & sqs4\n\n        self.assertTrue(isinstance(sqs, SearchQuerySet))\n        self.assertEqual(len(sqs.query.query_filter), 3)\n        self.assertEqual(\n            sqs.query.build_query(),\n            \"(NOT (title:(moof)) AND ((foo) OR (baz)) AND (bar))\",\n        )\n\n    def test___or__(self):\n        sqs1 = self.sqs.filter(content=\"foo\")\n        sqs2 = self.sqs.filter(content=\"bar\")\n        sqs = sqs1 | sqs2\n\n        self.assertTrue(isinstance(sqs, SearchQuerySet))\n        self.assertEqual(len(sqs.query.query_filter), 2)\n        self.assertEqual(sqs.query.build_query(), \"((foo) OR (bar))\")\n\n        # Now for something more complex...\n        sqs3 = self.sqs.exclude(title=\"moof\").filter(\n            SQ(content=\"foo\") | SQ(content=\"baz\")\n        )\n        sqs4 = self.sqs.filter(content=\"bar\").models(MockModel)\n        sqs = sqs3 | sqs4\n\n        self.assertTrue(isinstance(sqs, SearchQuerySet))\n        self.assertEqual(len(sqs.query.query_filter), 2)\n        self.assertEqual(\n            sqs.query.build_query(),\n            \"((NOT (title:(moof)) AND ((foo) OR (baz))) OR (bar))\",\n        )\n\n    def test_auto_query(self):\n        # Ensure bits in exact matches get escaped properly as well.\n        # This will break horrifically if escaping isn't working.\n        sqs = self.sqs.auto_query('\"pants:rule\"')\n        self.assertTrue(isinstance(sqs, SearchQuerySet))\n        self.assertEqual(\n            repr(sqs.query.query_filter), '<SQ: AND content__content=\"pants:rule\">'\n        )\n        self.assertEqual(sqs.query.build_query(), '(\"pants\\\\:rule\")')\n        self.assertEqual(len(sqs), 0)\n\n        sqs = self.sqs.auto_query(\"Canon+PowerShot+ELPH+(Black)\")\n        self.assertEqual(\n            sqs.query.build_query(), \"Canon\\\\+PowerShot\\\\+ELPH\\\\+\\\\(Black\\\\)\"\n        )\n        sqs = sqs.filter(tags__in=[\"cameras\", \"electronics\"])\n        self.assertEqual(len(sqs), 0)\n\n    def test_query__in(self):\n        self.assertGreater(len(self.sqs), 0)\n        sqs = self.sqs.filter(django_ct=\"core.mockmodel\", django_id__in=[1, 2])\n        self.assertEqual(len(sqs), 2)\n\n    def test_query__in_empty_list(self):\n        \"\"\"Confirm that an empty list avoids a Solr exception\"\"\"\n        self.assertGreater(len(self.sqs), 0)\n        sqs = self.sqs.filter(id__in=[])\n        self.assertEqual(len(sqs), 0)\n\n    # Regressions\n\n    def test_regression_proper_start_offsets(self):\n        sqs = self.sqs.filter(text=\"index\")\n        self.assertNotEqual(sqs.count(), 0)\n\n        id_counts = {}\n\n        for item in sqs:\n            if item.id in id_counts:\n                id_counts[item.id] += 1\n            else:\n                id_counts[item.id] = 1\n\n        for key, value in id_counts.items():\n            if value > 1:\n                self.fail(\n                    \"Result with id '%s' seen more than once in the results.\" % key\n                )\n\n    def test_regression_raw_search_breaks_slicing(self):\n        sqs = self.sqs.raw_search(\"text: index\")\n        page_1 = [result.pk for result in sqs[0:10]]\n        page_2 = [result.pk for result in sqs[10:20]]\n\n        for pk in page_2:\n            if pk in page_1:\n                self.fail(\n                    \"Result with id '%s' seen more than once in the results.\" % pk\n                )\n\n    # RelatedSearchQuerySet Tests\n\n    def test_related_load_all(self):\n        sqs = self.rsqs.load_all()\n\n        # load_all should not change the results or their ordering:\n        self.assertListEqual([i.id for i in sqs], [i.id for i in self.rsqs])\n\n        self.assertTrue(isinstance(sqs, SearchQuerySet))\n        self.assertTrue(len(sqs) > 0)\n\n        self.assertEqual(\n            sqs[0].object.foo,\n            \"Registering indexes in Haystack is very similar to registering models and ``ModelAdmin`` classes in the `Django admin site`_.  If you want to override the default indexing behavior for your model you can specify your own ``SearchIndex`` class.  This is useful for ensuring that future-dated or non-live content is not indexed and searchable. Our ``Note`` model has a ``pub_date`` field, so let's update our code to include our own ``SearchIndex`` to exclude indexing future-dated notes:\",\n        )\n\n    def test_related_load_all_queryset(self):\n        sqs = self.rsqs.load_all()\n\n        # load_all should not change the results or their ordering:\n        self.assertListEqual([i.id for i in sqs], [i.id for i in self.rsqs])\n\n        self.assertEqual(len(sqs._load_all_querysets), 0)\n\n        sqs = sqs.load_all_queryset(MockModel, MockModel.objects.filter(id__gt=1))\n        self.assertTrue(isinstance(sqs, SearchQuerySet))\n        self.assertEqual(len(sqs._load_all_querysets), 1)\n        self.assertEqual([obj.object.id for obj in sqs], list(range(2, 24)))\n\n        sqs = sqs.load_all_queryset(MockModel, MockModel.objects.filter(id__gt=10))\n        self.assertTrue(isinstance(sqs, SearchQuerySet))\n        self.assertEqual(len(sqs._load_all_querysets), 1)\n        self.assertEqual([obj.object.id for obj in sqs], list(range(11, 24)))\n        self.assertEqual([obj.object.id for obj in sqs[10:20]], [21, 22, 23])\n\n    def test_related_iter(self):\n        reset_search_queries()\n        self.assertEqual(len(connections[\"solr\"].queries), 0)\n        sqs = self.rsqs.all()\n        results = [int(result.pk) for result in iter(sqs)]\n        self.assertEqual(results, list(range(1, 24)))\n        self.assertEqual(len(connections[\"solr\"].queries), 3)\n\n    def test_related_slice(self):\n        reset_search_queries()\n        self.assertEqual(len(connections[\"solr\"].queries), 0)\n        results = self.rsqs.all()\n        self.assertEqual(\n            [int(result.pk) for result in results[1:11]],\n            [2, 3, 4, 5, 6, 7, 8, 9, 10, 11],\n        )\n        self.assertEqual(len(connections[\"solr\"].queries), 1)\n\n        reset_search_queries()\n        self.assertEqual(len(connections[\"solr\"].queries), 0)\n        results = self.rsqs.all()\n        self.assertEqual(int(results[21].pk), 22)\n        self.assertEqual(len(connections[\"solr\"].queries), 1)\n\n        reset_search_queries()\n        self.assertEqual(len(connections[\"solr\"].queries), 0)\n        results = self.rsqs.all()\n        self.assertEqual([int(result.pk) for result in results[20:30]], [21, 22, 23])\n        self.assertEqual(len(connections[\"solr\"].queries), 1)\n\n    def test_related_manual_iter(self):\n        results = self.rsqs.all()\n        reset_search_queries()\n        self.assertEqual(len(connections[\"solr\"].queries), 0)\n        results = [int(result.pk) for result in results._manual_iter()]\n        self.assertEqual(results, list(range(1, 24)))\n        self.assertEqual(len(connections[\"solr\"].queries), 3)\n\n    def test_related_fill_cache(self):\n        reset_search_queries()\n        self.assertEqual(len(connections[\"solr\"].queries), 0)\n        results = self.rsqs.all()\n        self.assertEqual(len(results._result_cache), 0)\n        self.assertEqual(len(connections[\"solr\"].queries), 0)\n        results._fill_cache(0, 10)\n        self.assertEqual(\n            len([result for result in results._result_cache if result is not None]), 10\n        )\n        self.assertEqual(len(connections[\"solr\"].queries), 1)\n        results._fill_cache(10, 20)\n        self.assertEqual(\n            len([result for result in results._result_cache if result is not None]), 20\n        )\n        self.assertEqual(len(connections[\"solr\"].queries), 2)\n\n    def test_related_cache_is_full(self):\n        reset_search_queries()\n        self.assertEqual(len(connections[\"solr\"].queries), 0)\n        self.assertEqual(self.rsqs._cache_is_full(), False)\n        results = self.rsqs.all()\n        fire_the_iterator_and_fill_cache = list(results)\n        self.assertEqual(23, len(fire_the_iterator_and_fill_cache))\n        self.assertEqual(results._cache_is_full(), True)\n        self.assertEqual(len(connections[\"solr\"].queries), 4)\n\n    def test_quotes_regression(self):\n        sqs = self.sqs.auto_query(\"44°48'40''N 20°28'32''E\")\n        # Should not have empty terms.\n        self.assertEqual(sqs.query.build_query(), \"(44\\xb048'40''N 20\\xb028'32''E)\")\n        # Should not cause Solr to 500.\n        try:\n            sqs.count()\n        except Exception as exc:\n            self.fail(\"raised unexpected error: %s\" % exc)\n\n        sqs = self.sqs.auto_query(\"blazing\")\n        self.assertEqual(sqs.query.build_query(), \"(blazing)\")\n        self.assertEqual(sqs.count(), 0)\n        sqs = self.sqs.auto_query(\"blazing saddles\")\n        self.assertEqual(sqs.query.build_query(), \"(blazing saddles)\")\n        self.assertEqual(sqs.count(), 0)\n        sqs = self.sqs.auto_query('\"blazing saddles')\n        self.assertEqual(sqs.query.build_query(), '(\\\\\"blazing saddles)')\n        self.assertEqual(sqs.count(), 0)\n        sqs = self.sqs.auto_query('\"blazing saddles\"')\n        self.assertEqual(sqs.query.build_query(), '(\"blazing saddles\")')\n        self.assertEqual(sqs.count(), 0)\n        sqs = self.sqs.auto_query('mel \"blazing saddles\"')\n        self.assertEqual(sqs.query.build_query(), '(mel \"blazing saddles\")')\n        self.assertEqual(sqs.count(), 0)\n        sqs = self.sqs.auto_query('mel \"blazing \\'saddles\"')\n        self.assertEqual(sqs.query.build_query(), '(mel \"blazing \\'saddles\")')\n        self.assertEqual(sqs.count(), 0)\n        sqs = self.sqs.auto_query(\"mel \\\"blazing ''saddles\\\"\")\n        self.assertEqual(sqs.query.build_query(), \"(mel \\\"blazing ''saddles\\\")\")\n        self.assertEqual(sqs.count(), 0)\n        sqs = self.sqs.auto_query(\"mel \\\"blazing ''saddles\\\"'\")\n        self.assertEqual(sqs.query.build_query(), \"(mel \\\"blazing ''saddles\\\" ')\")\n        self.assertEqual(sqs.count(), 0)\n        sqs = self.sqs.auto_query(\"mel \\\"blazing ''saddles\\\"'\\\"\")\n        self.assertEqual(sqs.query.build_query(), \"(mel \\\"blazing ''saddles\\\" '\\\\\\\")\")\n        self.assertEqual(sqs.count(), 0)\n        sqs = self.sqs.auto_query('\"blazing saddles\" mel')\n        self.assertEqual(sqs.query.build_query(), '(\"blazing saddles\" mel)')\n        self.assertEqual(sqs.count(), 0)\n        sqs = self.sqs.auto_query('\"blazing saddles\" mel brooks')\n        self.assertEqual(sqs.query.build_query(), '(\"blazing saddles\" mel brooks)')\n        self.assertEqual(sqs.count(), 0)\n        sqs = self.sqs.auto_query('mel \"blazing saddles\" brooks')\n        self.assertEqual(sqs.query.build_query(), '(mel \"blazing saddles\" brooks)')\n        self.assertEqual(sqs.count(), 0)\n        sqs = self.sqs.auto_query('mel \"blazing saddles\" \"brooks')\n        self.assertEqual(sqs.query.build_query(), '(mel \"blazing saddles\" \\\\\"brooks)')\n        self.assertEqual(sqs.count(), 0)\n\n    def test_query_generation(self):\n        sqs = self.sqs.filter(\n            SQ(content=AutoQuery(\"hello world\")) | SQ(title=AutoQuery(\"hello world\"))\n        )\n        self.assertEqual(\n            sqs.query.build_query(), \"((hello world) OR title:(hello world))\"\n        )\n\n    def test_result_class(self):\n        # Assert that we're defaulting to ``SearchResult``.\n        sqs = self.sqs.all()\n        self.assertTrue(isinstance(sqs[0], SearchResult))\n\n        # Custom class.\n        sqs = self.sqs.result_class(MockSearchResult).all()\n        self.assertTrue(isinstance(sqs[0], MockSearchResult))\n\n        # Reset to solr.\n        sqs = self.sqs.result_class(None).all()\n        self.assertTrue(isinstance(sqs[0], SearchResult))\n\n\nclass LiveSolrMoreLikeThisTestCase(TestCase):\n    fixtures = [\"base_data.json\", \"bulk_data.json\"]\n\n    def setUp(self):\n        super().setUp()\n\n        # Wipe it clean.\n        clear_solr_index()\n\n        self.old_ui = connections[\"solr\"].get_unified_index()\n        self.ui = UnifiedIndex()\n        self.smmi = SolrMockModelSearchIndex()\n        self.sammi = SolrAnotherMockModelSearchIndex()\n        self.ui.build(indexes=[self.smmi, self.sammi])\n        connections[\"solr\"]._index = self.ui\n\n        self.sqs = SearchQuerySet(\"solr\")\n\n        self.smmi.update(\"solr\")\n        self.sammi.update(\"solr\")\n\n    def tearDown(self):\n        # Restore.\n        connections[\"solr\"]._index = self.old_ui\n        super().tearDown()\n\n    def test_more_like_this(self):\n        all_mlt = self.sqs.more_like_this(MockModel.objects.get(pk=1))\n        self.assertEqual(\n            all_mlt.count(),\n            len([result.pk for result in all_mlt]),\n            msg=\"mlt SearchQuerySet .count() didn't match retrieved result length\",\n        )\n\n        # Rather than hard-code assumptions about Solr's return order, we have a few very similar\n        # items which we'll confirm are included in the first 5 results. This is still ugly as we're\n        # hard-coding primary keys but it's better than breaking any time a Solr update or data\n        # change causes a score to shift slightly\n        top_results = [int(result.pk) for result in all_mlt[:5]]\n        for i in (14, 6, 10, 4, 5):\n            self.assertIn(i, top_results)\n\n        filtered_mlt = self.sqs.filter(name=\"daniel3\").more_like_this(\n            MockModel.objects.get(pk=3)\n        )\n        self.assertLess(filtered_mlt.count(), all_mlt.count())\n        top_filtered_results = [int(result.pk) for result in filtered_mlt[:5]]\n\n        for i in (16, 17, 19, 13, 23):\n            self.assertIn(i, top_filtered_results)\n\n        mlt_filtered = self.sqs.more_like_this(MockModel.objects.get(pk=3)).filter(\n            name=\"daniel3\"\n        )\n        self.assertLess(mlt_filtered.count(), all_mlt.count())\n        top_mlt_filtered_pks = [int(result.pk) for result in mlt_filtered[:5]]\n\n        for i in (17, 16, 19, 23, 13):\n            self.assertIn(i, top_mlt_filtered_pks)\n\n        filtered_mlt_with_models = self.sqs.models(MockModel).more_like_this(\n            MockModel.objects.get(pk=1)\n        )\n        self.assertLessEqual(filtered_mlt_with_models.count(), all_mlt.count())\n        top_filtered_with_models = [\n            int(result.pk) for result in filtered_mlt_with_models[:5]\n        ]\n\n        for i in (14, 6, 4, 5, 10):\n            self.assertIn(i, top_filtered_with_models)\n\n    def test_more_like_this_defer(self):\n        mi = MockModel.objects.defer(\"foo\").get(pk=1)\n        deferred = self.sqs.models(MockModel).more_like_this(mi)\n        top_results = [int(result.pk) for result in deferred[:5]]\n\n        for i in (14, 6, 4, 5, 10):\n            self.assertIn(i, top_results)\n\n    def test_more_like_this_custom_result_class(self):\n        \"\"\"Ensure that swapping the ``result_class`` works\"\"\"\n        first_result = self.sqs.result_class(MockSearchResult).more_like_this(\n            MockModel.objects.get(pk=1)\n        )[0]\n        self.assertIsInstance(first_result, MockSearchResult)\n\n\nclass LiveSolrAutocompleteTestCase(TestCase):\n    fixtures = [\"base_data.json\", \"bulk_data.json\"]\n\n    def setUp(self):\n        super().setUp()\n\n        # Wipe it clean.\n        clear_solr_index()\n\n        # Stow.\n        self.old_ui = connections[\"solr\"].get_unified_index()\n        self.ui = UnifiedIndex()\n        self.smmi = SolrAutocompleteMockModelSearchIndex()\n        self.ui.build(indexes=[self.smmi])\n        connections[\"solr\"]._index = self.ui\n\n        self.sqs = SearchQuerySet(\"solr\")\n\n        self.smmi.update(using=\"solr\")\n\n    def tearDown(self):\n        # Restore.\n        connections[\"solr\"]._index = self.old_ui\n        super().tearDown()\n\n    def test_autocomplete(self):\n        autocomplete = self.sqs.autocomplete(text_auto=\"mod\")\n        self.assertEqual(autocomplete.count(), 5)\n        self.assertSetEqual(\n            set([result.pk for result in autocomplete]),\n            set([\"1\", \"12\", \"6\", \"7\", \"14\"]),\n        )\n        self.assertTrue(\"mod\" in autocomplete[0].text.lower())\n        self.assertTrue(\"mod\" in autocomplete[1].text.lower())\n        self.assertTrue(\"mod\" in autocomplete[2].text.lower())\n        self.assertTrue(\"mod\" in autocomplete[3].text.lower())\n        self.assertTrue(\"mod\" in autocomplete[4].text.lower())\n        self.assertEqual(len([result.pk for result in autocomplete]), 5)\n\n        # Test multiple words.\n        autocomplete_2 = self.sqs.autocomplete(text_auto=\"your mod\")\n        self.assertEqual(autocomplete_2.count(), 3)\n        self.assertSetEqual(\n            set([result.pk for result in autocomplete_2]), set([\"1\", \"14\", \"6\"])\n        )\n        self.assertTrue(\"your\" in autocomplete_2[0].text.lower())\n        self.assertTrue(\"mod\" in autocomplete_2[0].text.lower())\n        self.assertTrue(\"your\" in autocomplete_2[1].text.lower())\n        self.assertTrue(\"mod\" in autocomplete_2[1].text.lower())\n        self.assertTrue(\"your\" in autocomplete_2[2].text.lower())\n        self.assertTrue(\"mod\" in autocomplete_2[2].text.lower())\n        self.assertEqual(len([result.pk for result in autocomplete_2]), 3)\n\n        # Test multiple fields.\n        autocomplete_3 = self.sqs.autocomplete(text_auto=\"Django\", name_auto=\"dan\")\n        self.assertEqual(autocomplete_3.count(), 4)\n        self.assertSetEqual(\n            set([result.pk for result in autocomplete_3]), set([\"12\", \"1\", \"14\", \"22\"])\n        )\n        self.assertEqual(len([result.pk for result in autocomplete_3]), 4)\n\n\nclass LiveSolrRoundTripTestCase(TestCase):\n    def setUp(self):\n        super().setUp()\n\n        # Wipe it clean.\n        clear_solr_index()\n\n        # Stow.\n        self.old_ui = connections[\"solr\"].get_unified_index()\n        self.ui = UnifiedIndex()\n        self.srtsi = SolrRoundTripSearchIndex()\n        self.ui.build(indexes=[self.srtsi])\n        connections[\"solr\"]._index = self.ui\n        self.sb = connections[\"solr\"].get_backend()\n\n        self.sqs = SearchQuerySet(\"solr\")\n\n        # Fake indexing.\n        mock = MockModel()\n        mock.id = 1\n        self.sb.update(self.srtsi, [mock])\n\n    def tearDown(self):\n        # Restore.\n        connections[\"solr\"]._index = self.old_ui\n        super().tearDown()\n\n    def test_round_trip(self):\n        results = self.sqs.filter(id=\"core.mockmodel.1\")\n\n        # Sanity check.\n        self.assertEqual(results.count(), 1)\n\n        # Check the individual fields.\n        result = results[0]\n        self.assertEqual(result.id, \"core.mockmodel.1\")\n        self.assertEqual(result.text, \"This is some example text.\")\n        self.assertEqual(result.name, \"Mister Pants\")\n        self.assertEqual(result.is_active, True)\n        self.assertEqual(result.post_count, 25)\n        self.assertEqual(result.average_rating, 3.6)\n        self.assertEqual(result.price, \"24.99\")\n        self.assertEqual(result.pub_date, datetime.date(2009, 11, 21))\n        self.assertEqual(result.created, datetime.datetime(2009, 11, 21, 21, 31, 00))\n        self.assertEqual(result.tags, [\"staff\", \"outdoor\", \"activist\", \"scientist\"])\n        self.assertEqual(result.sites, [3, 5, 1])\n\n\nclass LiveSolrPickleTestCase(TestCase):\n    fixtures = [\"base_data.json\", \"bulk_data.json\"]\n\n    def setUp(self):\n        super().setUp()\n\n        # Wipe it clean.\n        clear_solr_index()\n\n        # Stow.\n        self.old_ui = connections[\"solr\"].get_unified_index()\n        self.ui = UnifiedIndex()\n        self.smmi = SolrMockModelSearchIndex()\n        self.sammi = SolrAnotherMockModelSearchIndex()\n        self.ui.build(indexes=[self.smmi, self.sammi])\n        connections[\"solr\"]._index = self.ui\n\n        self.sqs = SearchQuerySet(\"solr\")\n\n        self.smmi.update(\"solr\")\n        self.sammi.update(\"solr\")\n\n    def tearDown(self):\n        # Restore.\n        connections[\"solr\"]._index = self.old_ui\n        super().tearDown()\n\n    def test_pickling(self):\n        results = self.sqs.all()\n\n        for res in results:\n            # Make sure the cache is full.\n            pass\n\n        in_a_pickle = pickle.dumps(results)\n        like_a_cuke = pickle.loads(in_a_pickle)\n        self.assertEqual(len(like_a_cuke), len(results))\n        self.assertEqual(like_a_cuke[0].id, results[0].id)\n\n\nclass SolrBoostBackendTestCase(TestCase):\n    def setUp(self):\n        super().setUp()\n\n        # Wipe it clean.\n        self.raw_solr = pysolr.Solr(settings.HAYSTACK_CONNECTIONS[\"solr\"][\"URL\"])\n        clear_solr_index()\n\n        # Stow.\n        self.old_ui = connections[\"solr\"].get_unified_index()\n        self.ui = UnifiedIndex()\n        self.smmi = SolrBoostMockSearchIndex()\n        self.ui.build(indexes=[self.smmi])\n        connections[\"solr\"]._index = self.ui\n        self.sb = connections[\"solr\"].get_backend()\n\n        self.sample_objs = []\n\n        for i in range(1, 5):\n            mock = AFourthMockModel()\n            mock.id = i\n\n            if i % 2:\n                mock.author = \"daniel\"\n                mock.editor = \"david\"\n            else:\n                mock.author = \"david\"\n                mock.editor = \"daniel\"\n\n            mock.pub_date = datetime.date(2009, 2, 25) - datetime.timedelta(days=i)\n            self.sample_objs.append(mock)\n\n    def tearDown(self):\n        connections[\"solr\"]._index = self.old_ui\n        super().tearDown()\n\n    def test_boost(self):\n        self.sb.update(self.smmi, self.sample_objs)\n        self.assertEqual(self.raw_solr.search(\"*:*\").hits, 4)\n\n        results = SearchQuerySet(\"solr\").filter(\n            SQ(author=\"daniel\") | SQ(editor=\"daniel\")\n        )\n\n        self.assertEqual(\n            [result.id for result in results],\n            [\n                \"core.afourthmockmodel.1\",\n                \"core.afourthmockmodel.3\",\n                \"core.afourthmockmodel.2\",\n                \"core.afourthmockmodel.4\",\n            ],\n        )\n\n\n@unittest.skipIf(\n    parse_version(pysolr.__version__) < parse_version(\"3.1.1\"),\n    \"content extraction requires pysolr > 3.1.1\",\n)\nclass LiveSolrContentExtractionTestCase(TestCase):\n    def setUp(self):\n        super().setUp()\n\n        self.sb = connections[\"solr\"].get_backend()\n\n    def test_content_extraction(self):\n        f = open(\n            os.path.join(os.path.dirname(__file__), \"content_extraction\", \"test.pdf\"),\n            \"rb\",\n        )\n\n        data = self.sb.extract_file_contents(f)\n\n        self.assertTrue(\"haystack\" in data[\"contents\"])\n        self.assertEqual(data[\"metadata\"][\"Content-Type\"], [\"application/pdf\"])\n        self.assertTrue(any(i for i in data[\"metadata\"][\"Keywords\"] if \"SolrCell\" in i))\n"
  },
  {
    "path": "test_haystack/solr_tests/test_solr_management_commands.py",
    "content": "import datetime\nimport os\nfrom io import StringIO\nfrom tempfile import mkdtemp\nfrom unittest.mock import patch\n\nimport pysolr\nfrom django.conf import settings\nfrom django.core.exceptions import ImproperlyConfigured\nfrom django.core.management import call_command\nfrom django.core.management.base import CommandError\nfrom django.test import TestCase\n\nfrom haystack import connections, constants, indexes\nfrom haystack.utils.loading import UnifiedIndex\n\nfrom ..core.models import MockModel, MockTag\n\n\nclass SolrMockSearchIndex(indexes.SearchIndex, indexes.Indexable):\n    text = indexes.CharField(document=True, use_template=True)\n    name = indexes.CharField(model_attr=\"author\", faceted=True)\n    pub_date = indexes.DateTimeField(model_attr=\"pub_date\")\n\n    def get_model(self):\n        return MockModel\n\n    def get_updated_field(self):\n        return \"pub_date\"\n\n\nclass SolrMockTagSearchIndex(indexes.SearchIndex, indexes.Indexable):\n    text = indexes.CharField(document=True, model_attr=\"name\")\n\n    def get_model(self):\n        return MockTag\n\n\nclass SolrMockSecretKeySearchIndex(indexes.SearchIndex, indexes.Indexable):\n    Th3S3cr3tK3y = indexes.CharField(document=True, model_attr=\"author\")\n\n    def get_model(self):\n        return MockModel\n\n\nclass ManagementCommandTestCase(TestCase):\n    fixtures = [\"base_data.json\", \"bulk_data.json\"]\n\n    def setUp(self):\n        super().setUp()\n        self.solr = pysolr.Solr(settings.HAYSTACK_CONNECTIONS[\"solr\"][\"URL\"])\n\n        # Stow.\n        self.old_ui = connections[\"solr\"].get_unified_index()\n        self.ui = UnifiedIndex()\n        self.smmi = SolrMockSearchIndex()\n        self.ui.build(indexes=[self.smmi])\n        connections[\"solr\"]._index = self.ui\n\n    def tearDown(self):\n        connections[\"solr\"]._index = self.old_ui\n        super().tearDown()\n\n    def verify_indexed_documents(self):\n        \"\"\"Confirm that the documents in the search index match the database\"\"\"\n\n        res = self.solr.search(\"*:*\", fl=[\"id\"], rows=50)\n        self.assertEqual(res.hits, 23)\n\n        indexed_doc_ids = set(i[\"id\"] for i in res.docs)\n        expected_doc_ids = set(\n            \"core.mockmodel.%d\" % i\n            for i in MockModel.objects.values_list(\"pk\", flat=True)\n        )\n\n        self.assertSetEqual(indexed_doc_ids, expected_doc_ids)\n\n    def test_basic_commands(self):\n        call_command(\"clear_index\", interactive=False, verbosity=0)\n        self.assertEqual(self.solr.search(\"*:*\").hits, 0)\n\n        call_command(\"update_index\", verbosity=0, commit=False)\n        self.assertEqual(self.solr.search(\"*:*\").hits, 0)\n\n        call_command(\"update_index\", verbosity=0)\n        self.verify_indexed_documents()\n\n        call_command(\"clear_index\", interactive=False, verbosity=0)\n        self.assertEqual(self.solr.search(\"*:*\").hits, 0)\n\n        call_command(\"rebuild_index\", interactive=False, verbosity=0, commit=False)\n        self.assertEqual(self.solr.search(\"*:*\").hits, 0)\n\n        call_command(\"rebuild_index\", interactive=False, verbosity=0, commit=True)\n        self.verify_indexed_documents()\n\n        call_command(\"clear_index\", interactive=False, verbosity=0, commit=False)\n        self.verify_indexed_documents()\n\n    def test_remove(self):\n        call_command(\"clear_index\", interactive=False, verbosity=0)\n        self.assertEqual(self.solr.search(\"*:*\").hits, 0)\n\n        call_command(\"update_index\", verbosity=0)\n        self.verify_indexed_documents()\n\n        # Remove several instances, two of which will fit in the same block:\n        MockModel.objects.get(pk=1).delete()\n        MockModel.objects.get(pk=2).delete()\n        MockModel.objects.get(pk=8).delete()\n        self.assertEqual(self.solr.search(\"*:*\").hits, 23)\n\n        # Plain ``update_index`` doesn't fix it.\n        call_command(\"update_index\", verbosity=0)\n        self.assertEqual(self.solr.search(\"*:*\").hits, 23)\n\n        # Remove without commit also doesn't affect queries:\n        call_command(\n            \"update_index\", remove=True, verbosity=0, batchsize=2, commit=False\n        )\n        self.assertEqual(self.solr.search(\"*:*\").hits, 23)\n\n        # … but remove with commit does:\n        call_command(\"update_index\", remove=True, verbosity=0, batchsize=2)\n        self.assertEqual(self.solr.search(\"*:*\").hits, 20)\n\n    def test_age(self):\n        call_command(\"clear_index\", interactive=False, verbosity=0)\n        self.assertEqual(self.solr.search(\"*:*\").hits, 0)\n\n        start = datetime.datetime.now() - datetime.timedelta(hours=3)\n        end = datetime.datetime.now()\n\n        mock = MockModel.objects.get(pk=1)\n        mock.pub_date = datetime.datetime.now() - datetime.timedelta(hours=2)\n        mock.save()\n        self.assertEqual(\n            MockModel.objects.filter(pub_date__range=(start, end)).count(), 1\n        )\n\n        call_command(\"update_index\", age=3, verbosity=0)\n        self.assertEqual(self.solr.search(\"*:*\").hits, 1)\n\n    def test_age_with_time_zones(self):\n        \"\"\"Haystack should use django.utils.timezone.now\"\"\"\n        from django.utils.timezone import now as django_now\n\n        from haystack.management.commands.update_index import now as haystack_now\n\n        self.assertIs(\n            haystack_now,\n            django_now,\n            msg=\"update_index should use django.utils.timezone.now\",\n        )\n\n        with patch(\"haystack.management.commands.update_index.now\") as m:\n            m.return_value = django_now()\n            self.test_age()\n            assert m.called\n\n    def test_dates(self):\n        call_command(\"clear_index\", interactive=False, verbosity=0)\n        self.assertEqual(self.solr.search(\"*:*\").hits, 0)\n\n        start = datetime.datetime.now() - datetime.timedelta(hours=5, minutes=30)\n        end = datetime.datetime.now() - datetime.timedelta(hours=2)\n\n        mock_1 = MockModel.objects.get(pk=1)\n        mock_1.pub_date = datetime.datetime.now() - datetime.timedelta(\n            hours=5, minutes=1\n        )\n        mock_1.save()\n        mock_2 = MockModel.objects.get(pk=2)\n        mock_2.pub_date = datetime.datetime.now() - datetime.timedelta(hours=3)\n        mock_2.save()\n        mock_3 = MockModel.objects.get(pk=3)\n        mock_3.pub_date = datetime.datetime.now() - datetime.timedelta(hours=1)\n        mock_3.save()\n        self.assertEqual(\n            MockModel.objects.filter(pub_date__range=(start, end)).count(), 2\n        )\n\n        call_command(\n            \"update_index\",\n            start_date=start.isoformat(),\n            end_date=end.isoformat(),\n            verbosity=0,\n        )\n        self.assertEqual(self.solr.search(\"*:*\").hits, 2)\n\n    def test_multiprocessing(self):\n        call_command(\"clear_index\", interactive=False, verbosity=0)\n        self.assertEqual(self.solr.search(\"*:*\").hits, 0)\n\n        call_command(\"update_index\", verbosity=2, workers=2, batchsize=5)\n        self.verify_indexed_documents()\n\n        call_command(\"clear_index\", interactive=False, verbosity=0)\n        self.assertEqual(self.solr.search(\"*:*\").hits, 0)\n\n        call_command(\"update_index\", verbosity=2, workers=2, batchsize=5, commit=False)\n        self.assertEqual(self.solr.search(\"*:*\").hits, 0)\n\n    def test_build_schema_wrong_backend(self):\n\n        settings.HAYSTACK_CONNECTIONS[\"whoosh\"] = {\n            \"ENGINE\": \"haystack.backends.whoosh_backend.WhooshEngine\",\n            \"PATH\": mkdtemp(prefix=\"dummy-path-\"),\n        }\n\n        connections[\"whoosh\"]._index = self.ui\n        self.assertRaises(\n            ImproperlyConfigured, call_command, \"build_solr_schema\", using=\"whoosh\"\n        )\n\n    def test_build_schema(self):\n\n        # Stow.\n        oldhdf = constants.DOCUMENT_FIELD\n        oldui = connections[\"solr\"].get_unified_index()\n        oldurl = settings.HAYSTACK_CONNECTIONS[\"solr\"][\"URL\"]\n\n        try:\n            needle = \"Th3S3cr3tK3y\"\n            constants.DOCUMENT_FIELD = (\n                needle  # Force index to use new key for document_fields\n            )\n            settings.HAYSTACK_CONNECTIONS[\"solr\"][\"URL\"] = (\n                settings.HAYSTACK_CONNECTIONS[\"solr\"][\"URL\"].rsplit(\"/\", 1)[0]\n                + \"/mgmnt\"\n            )\n\n            ui = UnifiedIndex()\n            ui.build(indexes=[SolrMockSecretKeySearchIndex()])\n            connections[\"solr\"]._index = ui\n\n            rendered_file = StringIO()\n\n            script_dir = os.path.realpath(os.path.dirname(__file__))\n            conf_dir = os.path.join(\n                script_dir, \"server\", \"solr\", \"server\", \"solr\", \"mgmnt\", \"conf\"\n            )\n            schema_file = os.path.join(conf_dir, \"schema.xml\")\n            solrconfig_file = os.path.join(conf_dir, \"solrconfig.xml\")\n\n            self.assertTrue(\n                os.path.isdir(conf_dir), msg=\"Expected %s to be a directory\" % conf_dir\n            )\n\n            call_command(\"build_solr_schema\", using=\"solr\", stdout=rendered_file)\n            contents = rendered_file.getvalue()\n            self.assertGreater(contents.find('name=\"%s' % needle), -1)\n\n            call_command(\n                \"build_solr_schema\", using=\"solr\", configure_directory=conf_dir\n            )\n            with open(schema_file) as s:\n                self.assertGreater(s.read().find('name=\"%s' % needle), -1)\n            with open(solrconfig_file) as s:\n                self.assertGreater(s.read().find('name=\"df\">%s' % needle), -1)\n\n            self.assertTrue(\n                os.path.isfile(os.path.join(conf_dir, \"managed-schema.old\"))\n            )\n\n            call_command(\"build_solr_schema\", using=\"solr\", reload_core=True)\n\n            os.rename(schema_file, \"%s.bak\" % schema_file)\n            self.assertRaises(\n                CommandError,\n                call_command,\n                \"build_solr_schema\",\n                using=\"solr\",\n                reload_core=True,\n            )\n\n            call_command(\"build_solr_schema\", using=\"solr\", filename=schema_file)\n            with open(schema_file) as s:\n                self.assertGreater(s.read().find('name=\"%s' % needle), -1)\n        finally:\n            # reset\n            constants.DOCUMENT_FIELD = oldhdf\n            connections[\"solr\"]._index = oldui\n            settings.HAYSTACK_CONNECTIONS[\"solr\"][\"URL\"] = oldurl\n\n\nclass AppModelManagementCommandTestCase(TestCase):\n    fixtures = [\"base_data\", \"bulk_data.json\"]\n\n    def setUp(self):\n        super().setUp()\n        self.solr = pysolr.Solr(settings.HAYSTACK_CONNECTIONS[\"solr\"][\"URL\"])\n\n        # Stow.\n        self.old_ui = connections[\"solr\"].get_unified_index()\n        self.ui = UnifiedIndex()\n        self.smmi = SolrMockSearchIndex()\n        self.smtmi = SolrMockTagSearchIndex()\n        self.ui.build(indexes=[self.smmi, self.smtmi])\n        connections[\"solr\"]._index = self.ui\n\n    def tearDown(self):\n        connections[\"solr\"]._index = self.old_ui\n        super().tearDown()\n\n    def test_app_model_variations(self):\n        call_command(\"clear_index\", interactive=False, verbosity=0)\n        self.assertEqual(self.solr.search(\"*:*\").hits, 0)\n\n        call_command(\"update_index\", verbosity=0)\n        self.assertEqual(self.solr.search(\"*:*\").hits, 25)\n\n        call_command(\"clear_index\", interactive=False, verbosity=0)\n        self.assertEqual(self.solr.search(\"*:*\").hits, 0)\n\n        call_command(\"update_index\", \"core\", verbosity=0)\n        self.assertEqual(self.solr.search(\"*:*\").hits, 25)\n\n        call_command(\"clear_index\", interactive=False, verbosity=0)\n        self.assertEqual(self.solr.search(\"*:*\").hits, 0)\n\n        with self.assertRaises(ImproperlyConfigured):\n            call_command(\"update_index\", \"fake_app_thats_not_there\")\n\n        call_command(\"update_index\", \"core\", \"discovery\", verbosity=0)\n        self.assertEqual(self.solr.search(\"*:*\").hits, 25)\n\n        call_command(\"clear_index\", interactive=False, verbosity=0)\n        self.assertEqual(self.solr.search(\"*:*\").hits, 0)\n\n        call_command(\"update_index\", \"discovery\", verbosity=0)\n        self.assertEqual(self.solr.search(\"*:*\").hits, 0)\n\n        call_command(\"clear_index\", interactive=False, verbosity=0)\n        self.assertEqual(self.solr.search(\"*:*\").hits, 0)\n\n        call_command(\"update_index\", \"core.MockModel\", verbosity=0)\n        self.assertEqual(self.solr.search(\"*:*\").hits, 23)\n\n        call_command(\"clear_index\", interactive=False, verbosity=0)\n        self.assertEqual(self.solr.search(\"*:*\").hits, 0)\n\n        call_command(\"update_index\", \"core.MockTag\", verbosity=0)\n        self.assertEqual(self.solr.search(\"*:*\").hits, 2)\n\n        call_command(\"clear_index\", interactive=False, verbosity=0)\n        self.assertEqual(self.solr.search(\"*:*\").hits, 0)\n\n        call_command(\"update_index\", \"core.MockTag\", \"core.MockModel\", verbosity=0)\n        self.assertEqual(self.solr.search(\"*:*\").hits, 25)\n"
  },
  {
    "path": "test_haystack/solr_tests/test_solr_query.py",
    "content": "import datetime\n\nfrom django.test import TestCase\n\nfrom haystack import connections\nfrom haystack.inputs import AltParser, Exact\nfrom haystack.models import SearchResult\nfrom haystack.query import SQ, SearchQuerySet\n\nfrom ..core.models import AnotherMockModel, MockModel\n\n\nclass SolrSearchQueryTestCase(TestCase):\n    fixtures = [\"base_data\"]\n\n    def setUp(self):\n        super().setUp()\n        self.sq = connections[\"solr\"].get_query()\n\n    def test_build_query_all(self):\n        self.assertEqual(self.sq.build_query(), \"*:*\")\n\n    def test_build_query_single_word(self):\n        self.sq.add_filter(SQ(content=\"hello\"))\n        self.assertEqual(self.sq.build_query(), \"(hello)\")\n\n    def test_build_query_boolean(self):\n        self.sq.add_filter(SQ(content=True))\n        self.assertEqual(self.sq.build_query(), \"(true)\")\n\n    def test_build_query_datetime(self):\n        self.sq.add_filter(SQ(content=datetime.datetime(2009, 5, 8, 11, 28)))\n        self.assertEqual(self.sq.build_query(), \"(2009-05-08T11:28:00Z)\")\n\n    def test_build_query_multiple_words_and(self):\n        self.sq.add_filter(SQ(content=\"hello\"))\n        self.sq.add_filter(SQ(content=\"world\"))\n        self.assertEqual(self.sq.build_query(), \"((hello) AND (world))\")\n\n    def test_build_query_multiple_words_not(self):\n        self.sq.add_filter(~SQ(content=\"hello\"))\n        self.sq.add_filter(~SQ(content=\"world\"))\n        self.assertEqual(self.sq.build_query(), \"(NOT ((hello)) AND NOT ((world)))\")\n\n    def test_build_query_multiple_words_or(self):\n        self.sq.add_filter(~SQ(content=\"hello\"))\n        self.sq.add_filter(SQ(content=\"hello\"), use_or=True)\n        self.assertEqual(self.sq.build_query(), \"(NOT ((hello)) OR (hello))\")\n\n    def test_build_query_multiple_words_mixed(self):\n        self.sq.add_filter(SQ(content=\"why\"))\n        self.sq.add_filter(SQ(content=\"hello\"), use_or=True)\n        self.sq.add_filter(~SQ(content=\"world\"))\n        self.assertEqual(\n            self.sq.build_query(), \"(((why) OR (hello)) AND NOT ((world)))\"\n        )\n\n    def test_build_query_phrase(self):\n        self.sq.add_filter(SQ(content=\"hello world\"))\n        self.assertEqual(self.sq.build_query(), \"(hello AND world)\")\n\n        self.sq.add_filter(SQ(content__exact=\"hello world\"))\n        self.assertEqual(\n            self.sq.build_query(), '((hello AND world) AND (\"hello world\"))'\n        )\n\n    def test_build_query_boost(self):\n        self.sq.add_filter(SQ(content=\"hello\"))\n        self.sq.add_boost(\"world\", 5)\n        self.assertEqual(self.sq.build_query(), \"(hello) world^5\")\n\n    def test_correct_exact(self):\n        self.sq.add_filter(SQ(content=Exact(\"hello world\")))\n        self.assertEqual(self.sq.build_query(), '(\"hello world\")')\n\n    def test_build_query_multiple_filter_types(self):\n        self.sq.add_filter(SQ(content=\"why\"))\n        self.sq.add_filter(SQ(pub_date__lte=Exact(\"2009-02-10 01:59:00\")))\n        self.sq.add_filter(SQ(author__gt=\"daniel\"))\n        self.sq.add_filter(SQ(created__lt=Exact(\"2009-02-12 12:13:00\")))\n        self.sq.add_filter(SQ(title__gte=\"B\"))\n        self.sq.add_filter(SQ(id__in=[1, 2, 3]))\n        self.sq.add_filter(SQ(rating__range=[3, 5]))\n        self.assertEqual(\n            self.sq.build_query(),\n            '((why) AND pub_date:([* TO \"2009-02-10 01:59:00\"]) AND author:({\"daniel\" TO *}) AND created:({* TO \"2009-02-12 12:13:00\"}) AND title:([\"B\" TO *]) AND id:(\"1\" OR \"2\" OR \"3\") AND rating:([\"3\" TO \"5\"]))',\n        )\n\n    def test_build_complex_altparser_query(self):\n        self.sq.add_filter(SQ(content=AltParser(\"dismax\", \"Don't panic\", qf=\"text\")))\n        self.sq.add_filter(SQ(pub_date__lte=Exact(\"2009-02-10 01:59:00\")))\n        self.sq.add_filter(SQ(author__gt=\"daniel\"))\n        self.sq.add_filter(SQ(created__lt=Exact(\"2009-02-12 12:13:00\")))\n        self.sq.add_filter(SQ(title__gte=\"B\"))\n        self.sq.add_filter(SQ(id__in=[1, 2, 3]))\n        self.sq.add_filter(SQ(rating__range=[3, 5]))\n        query = self.sq.build_query()\n        self.assertTrue('(_query_:\"{!dismax qf=text}Don\\'t panic\")' in query)\n        self.assertTrue('pub_date:([* TO \"2009-02-10 01:59:00\"])' in query)\n        self.assertTrue('author:({\"daniel\" TO *})' in query)\n        self.assertTrue('created:({* TO \"2009-02-12 12:13:00\"})' in query)\n        self.assertTrue('title:([\"B\" TO *])' in query)\n        self.assertTrue('id:(\"1\" OR \"2\" OR \"3\")' in query)\n        self.assertTrue('rating:([\"3\" TO \"5\"])' in query)\n\n    def test_build_query_multiple_filter_types_with_datetimes(self):\n        self.sq.add_filter(SQ(content=\"why\"))\n        self.sq.add_filter(SQ(pub_date__lte=datetime.datetime(2009, 2, 10, 1, 59, 0)))\n        self.sq.add_filter(SQ(author__gt=\"daniel\"))\n        self.sq.add_filter(SQ(created__lt=datetime.datetime(2009, 2, 12, 12, 13, 0)))\n        self.sq.add_filter(SQ(title__gte=\"B\"))\n        self.sq.add_filter(SQ(id__in=[1, 2, 3]))\n        self.sq.add_filter(SQ(rating__range=[3, 5]))\n        self.assertEqual(\n            self.sq.build_query(),\n            '((why) AND pub_date:([* TO \"2009-02-10T01:59:00Z\"]) AND author:({\"daniel\" TO *}) AND created:({* TO \"2009-02-12T12:13:00Z\"}) AND title:([\"B\" TO *]) AND id:(\"1\" OR \"2\" OR \"3\") AND rating:([\"3\" TO \"5\"]))',\n        )\n\n    def test_build_query_in_filter_multiple_words(self):\n        self.sq.add_filter(SQ(content=\"why\"))\n        self.sq.add_filter(SQ(title__in=[\"A Famous Paper\", \"An Infamous Article\"]))\n        self.assertEqual(\n            self.sq.build_query(),\n            '((why) AND title:(\"A Famous Paper\" OR \"An Infamous Article\"))',\n        )\n\n    def test_build_query_in_filter_datetime(self):\n        self.sq.add_filter(SQ(content=\"why\"))\n        self.sq.add_filter(SQ(pub_date__in=[datetime.datetime(2009, 7, 6, 1, 56, 21)]))\n        self.assertEqual(\n            self.sq.build_query(), '((why) AND pub_date:(\"2009-07-06T01:56:21Z\"))'\n        )\n\n    def test_build_query_in_with_set(self):\n        self.sq.add_filter(SQ(content=\"why\"))\n        self.sq.add_filter(SQ(title__in=set([\"A Famous Paper\", \"An Infamous Article\"])))\n        query = self.sq.build_query()\n        self.assertTrue(\"(why)\" in query)\n\n        # Because ordering in Py3 is now random.\n        if 'title:(\"A ' in query:\n            self.assertTrue(\n                'title:(\"A Famous Paper\" OR \"An Infamous Article\")' in query\n            )\n        else:\n            self.assertTrue(\n                'title:(\"An Infamous Article\" OR \"A Famous Paper\")' in query\n            )\n\n    def test_build_query_with_contains(self):\n        self.sq.add_filter(SQ(content=\"circular\"))\n        self.sq.add_filter(SQ(title__contains=\"haystack\"))\n        self.assertEqual(self.sq.build_query(), \"((circular) AND title:(*haystack*))\")\n\n    def test_build_query_with_endswith(self):\n        self.sq.add_filter(SQ(content=\"circular\"))\n        self.sq.add_filter(SQ(title__endswith=\"haystack\"))\n        self.assertEqual(self.sq.build_query(), \"((circular) AND title:(*haystack))\")\n\n    def test_build_query_wildcard_filter_types(self):\n        self.sq.add_filter(SQ(content=\"why\"))\n        self.sq.add_filter(SQ(title__startswith=\"haystack\"))\n        self.assertEqual(self.sq.build_query(), \"((why) AND title:(haystack*))\")\n\n    def test_build_query_fuzzy_filter_types(self):\n        self.sq.add_filter(SQ(content=\"why\"))\n        self.sq.add_filter(SQ(title__fuzzy=\"haystack\"))\n        self.assertEqual(self.sq.build_query(), \"((why) AND title:(haystack~))\")\n\n    def test_clean(self):\n        self.assertEqual(self.sq.clean(\"hello world\"), \"hello world\")\n        self.assertEqual(self.sq.clean(\"hello AND world\"), \"hello and world\")\n        self.assertEqual(\n            self.sq.clean(\n                'hello AND OR NOT TO + - && || ! ( ) { } [ ] ^ \" ~ * ? : \\ / world'\n            ),\n            'hello and or not to \\\\+ \\\\- \\\\&& \\\\|| \\\\! \\\\( \\\\) \\\\{ \\\\} \\\\[ \\\\] \\\\^ \\\\\" \\\\~ \\\\* \\\\? \\\\: \\\\\\\\ \\\\/ world',\n        )\n        self.assertEqual(\n            self.sq.clean(\"so please NOTe i am in a bAND and bORed\"),\n            \"so please NOTe i am in a bAND and bORed\",\n        )\n\n    def test_build_query_with_models(self):\n        self.sq.add_filter(SQ(content=\"hello\"))\n        self.sq.add_model(MockModel)\n        self.assertEqual(self.sq.build_query(), \"(hello)\")\n\n        self.sq.add_model(AnotherMockModel)\n        self.assertEqual(self.sq.build_query(), \"(hello)\")\n\n    def test_set_result_class(self):\n        # Assert that we're defaulting to ``SearchResult``.\n        self.assertTrue(issubclass(self.sq.result_class, SearchResult))\n\n        # Custom class.\n        class IttyBittyResult(object):\n            pass\n\n        self.sq.set_result_class(IttyBittyResult)\n        self.assertTrue(issubclass(self.sq.result_class, IttyBittyResult))\n\n        # Reset to default.\n        self.sq.set_result_class(None)\n        self.assertTrue(issubclass(self.sq.result_class, SearchResult))\n\n    def test_in_filter_values_list(self):\n        self.sq.add_filter(SQ(content=\"why\"))\n        self.sq.add_filter(SQ(title__in=MockModel.objects.values_list(\"id\", flat=True)))\n        self.assertEqual(self.sq.build_query(), '((why) AND title:(\"1\" OR \"2\" OR \"3\"))')\n\n    def test_narrow_sq(self):\n        sqs = SearchQuerySet(using=\"solr\").narrow(SQ(foo=\"moof\"))\n        self.assertTrue(isinstance(sqs, SearchQuerySet))\n        self.assertEqual(len(sqs.query.narrow_queries), 1)\n        self.assertEqual(sqs.query.narrow_queries.pop(), \"foo:(moof)\")\n\n    def test_query__in(self):\n        sqs = SearchQuerySet(using=\"solr\").filter(id__in=[1, 2, 3])\n        self.assertEqual(sqs.query.build_query(), 'id:(\"1\" OR \"2\" OR \"3\")')\n\n    def test_query__in_empty_list(self):\n        \"\"\"Confirm that an empty list avoids a Solr exception\"\"\"\n        sqs = SearchQuerySet(using=\"solr\").filter(id__in=[])\n        self.assertEqual(sqs.query.build_query(), \"id:(!*:*)\")\n"
  },
  {
    "path": "test_haystack/solr_tests/test_templatetags.py",
    "content": "import unittest\nfrom unittest.mock import call, patch\n\nfrom django.template import Context, Template\nfrom django.test import TestCase\n\nfrom ..core.models import MockModel\n\n\n@patch(\"haystack.templatetags.more_like_this.SearchQuerySet\")\nclass MoreLikeThisTagTestCase(TestCase):\n    fixtures = [\"base_data\"]\n\n    def render(self, template, context):\n        # Why on Earth does Django not have a TemplateTestCase yet?\n        t = Template(template)\n        c = Context(context)\n        return t.render(c)\n\n    def test_more_like_this_without_limit(self, mock_sqs):\n        mock_model = MockModel.objects.get(pk=3)\n        template = \"\"\"{% load more_like_this %}{% more_like_this entry as related_content %}{% for rc in related_content %}{{ rc.id }}{% endfor %}\"\"\"\n        context = {\"entry\": mock_model}\n\n        mlt = mock_sqs.return_value.more_like_this\n        mlt.return_value = [{\"id\": \"test_id\"}]\n\n        self.assertEqual(\"test_id\", self.render(template, context))\n\n        mlt.assert_called_once_with(mock_model)\n\n    def test_more_like_this_with_limit(self, mock_sqs):\n        mock_model = MockModel.objects.get(pk=3)\n        template = \"\"\"{% load more_like_this %}{% more_like_this entry as related_content limit 5 %}{% for rc in related_content %}{{ rc.id }}{% endfor %}\"\"\"\n        context = {\"entry\": mock_model}\n\n        mlt = mock_sqs.return_value.more_like_this\n        mlt.return_value.__getitem__.return_value = [{\"id\": \"test_id\"}]\n\n        self.assertEqual(\"test_id\", self.render(template, context))\n\n        mlt.assert_called_once_with(mock_model)\n\n        mock_sqs.assert_has_calls(\n            [\n                call().more_like_this(mock_model),\n                call().more_like_this().__getitem__(slice(None, 5)),\n            ],\n            any_order=True,\n        )\n\n    # FIXME: https://github.com/toastdriven/django-haystack/issues/1069\n    @unittest.expectedFailure\n    def test_more_like_this_for_model(self, mock_sqs):\n        mock_model = MockModel.objects.get(pk=3)\n        template = \"\"\"{% load more_like_this %}{% more_like_this entry as related_content for \"core.mock\" limit 5 %}{% for rc in related_content %}{{ rc.id }}{% endfor %}\"\"\"\n        context = {\"entry\": mock_model}\n\n        self.render(template, context)\n\n        mock_sqs.assert_has_calls(\n            [\n                call().models().more_like_this(mock_model),\n                call().models().more_like_this().__getitem__(slice(None, 5)),\n            ],\n            any_order=True,\n        )\n"
  },
  {
    "path": "test_haystack/spatial/__init__.py",
    "content": "from ..utils import check_solr\n\n\ndef setup():\n    check_solr()\n"
  },
  {
    "path": "test_haystack/spatial/fixtures/sample_spatial_data.json",
    "content": "[\n    {\n        \"pk\": 1,\n        \"model\": \"spatial.checkin\",\n        \"fields\": {\n            \"username\": \"daniel\",\n            \"latitude\": 38.971955031423384,\n            \"longitude\": -95.23573637008667,\n            \"comment\": \"Man, I love the coffee at LPT!\",\n            \"created\": \"2011-12-13 09:12:23\"\n        }\n    },\n    {\n        \"pk\": 2,\n        \"model\": \"spatial.checkin\",\n        \"fields\": {\n            \"username\": \"daniel\",\n            \"latitude\": 38.967667537449294,\n            \"longitude\": -95.23528575897217,\n            \"comment\": \"At the Pig for coffee. No one is here.\",\n            \"created\": \"2011-12-13 10:21:23\"\n        }\n    },\n    {\n        \"pk\": 3,\n        \"model\": \"spatial.checkin\",\n        \"fields\": {\n            \"username\": \"daniel\",\n            \"latitude\": 38.971955031423384,\n            \"longitude\": -95.23573637008667,\n            \"comment\": \"Back to LPT's coffee.\",\n            \"created\": \"2011-12-14 14:53:23\"\n        }\n    },\n    {\n        \"pk\": 4,\n        \"model\": \"spatial.checkin\",\n        \"fields\": {\n            \"username\": \"daniel\",\n            \"latitude\": 38.92776639117804,\n            \"longitude\": -95.2584171295166,\n            \"comment\": \"I hate the lines at the post office.\",\n            \"created\": \"2011-12-14 10:01:23\"\n        }\n    },\n    {\n        \"pk\": 5,\n        \"model\": \"spatial.checkin\",\n        \"fields\": {\n            \"username\": \"daniel\",\n            \"latitude\": 38.96531514451104,\n            \"longitude\": -95.23622989654541,\n            \"comment\": \"ZOMGEncore!\",\n            \"created\": \"2011-12-14 12:30:23\"\n        }\n    },\n    {\n        \"pk\": 6,\n        \"model\": \"spatial.checkin\",\n        \"fields\": {\n            \"username\": \"daniel\",\n            \"latitude\": 38.97110422641184,\n            \"longitude\": -95.23511409759521,\n            \"comment\": \"Trying a little Java Break coffee to get the day going.\",\n            \"created\": \"2011-12-15 08:44:23\"\n        }\n    },\n    {\n        \"pk\": 7,\n        \"model\": \"spatial.checkin\",\n        \"fields\": {\n            \"username\": \"daniel\",\n            \"latitude\": 38.9128152,\n            \"longitude\": -94.6373083,\n            \"comment\": \"Apple Store! And they have coffee!\",\n            \"created\": \"2011-12-15 11:05:23\"\n        }\n    },\n    {\n        \"pk\": 8,\n        \"model\": \"spatial.checkin\",\n        \"fields\": {\n            \"username\": \"daniel\",\n            \"latitude\": 38.97143787665407,\n            \"longitude\": -95.23622989654541,\n            \"comment\": \"4bucks coffee run. :/\",\n            \"created\": \"2011-12-16 10:10:23\"\n        }\n    },\n    {\n        \"pk\": 9,\n        \"model\": \"spatial.checkin\",\n        \"fields\": {\n            \"username\": \"daniel\",\n            \"latitude\": 38.97080393984995,\n            \"longitude\": -95.23573637008667,\n            \"comment\": \"Time for lunch at Rudy's.\",\n            \"created\": \"2011-12-16 01:23:23\"\n        }\n    },\n    {\n        \"pk\": 10,\n        \"model\": \"spatial.checkin\",\n        \"fields\": {\n            \"username\": \"daniel\",\n            \"latitude\": 38.92588008485826,\n            \"longitude\": -95.2640175819397,\n            \"comment\": \"At Target. Again.\",\n            \"created\": \"2011-12-16 19:51:23\"\n        }\n    }\n]\n"
  },
  {
    "path": "test_haystack/spatial/models.py",
    "content": "import datetime\n\nfrom django.db import models\n\n\nclass Checkin(models.Model):\n    username = models.CharField(max_length=255)\n    # We're going to do some non-GeoDjango action, since the setup is\n    # complex enough. You could just as easily do:\n    #\n    #   location = models.PointField()\n    #\n    # ...and your ``search_indexes.py`` could be less complex.\n    latitude = models.FloatField()\n    longitude = models.FloatField()\n    comment = models.CharField(\n        max_length=140, blank=True, default=\"\", help_text=\"Say something pithy.\"\n    )\n    created = models.DateTimeField(default=datetime.datetime.now)\n\n    class Meta:\n        ordering = [\"-created\"]\n\n    # Again, with GeoDjango, this would be unnecessary.\n    def get_location(self):\n        # Nothing special about this Point, but ensure that's we don't have to worry\n        # about import paths.\n        from django.contrib.gis.geos import Point\n\n        pnt = Point(self.longitude, self.latitude)\n        return pnt\n"
  },
  {
    "path": "test_haystack/spatial/search_indexes.py",
    "content": "from haystack import indexes\n\nfrom .models import Checkin\n\n\nclass CheckinSearchIndex(indexes.SearchIndex, indexes.Indexable):\n    text = indexes.CharField(document=True)\n    username = indexes.CharField(model_attr=\"username\")\n    comment = indexes.CharField(model_attr=\"comment\")\n    # Again, if you were using GeoDjango, this could be just:\n    #   location = indexes.LocationField(model_attr='location')\n    location = indexes.LocationField(model_attr=\"get_location\")\n    created = indexes.DateTimeField(model_attr=\"created\")\n\n    def get_model(self):\n        return Checkin\n\n    def prepare_text(self, obj):\n        # Because I don't feel like creating a template just for this.\n        return \"\\n\".join([obj.comment, obj.username])\n"
  },
  {
    "path": "test_haystack/spatial/test_spatial.py",
    "content": "from django.contrib.gis.measure import D\nfrom django.test import TestCase\n\nfrom haystack import connections\nfrom haystack.exceptions import SpatialError\nfrom haystack.query import SearchQuerySet\nfrom haystack.utils.geo import (\n    ensure_distance,\n    ensure_geometry,\n    ensure_point,\n    ensure_wgs84,\n    generate_bounding_box,\n)\n\nfrom .models import Checkin\n\n\nclass SpatialUtilitiesTestCase(TestCase):\n    def test_ensure_geometry(self):\n        from django.contrib.gis.geos import GEOSGeometry, Point\n\n        self.assertRaises(\n            SpatialError, ensure_geometry, [38.97127105172941, -95.23592948913574]\n        )\n        ensure_geometry(GEOSGeometry(\"POLYGON((-95 38, -96 40, -97 42, -95 38))\"))\n        ensure_geometry(GEOSGeometry(\"POINT(-95.23592948913574 38.97127105172941)\"))\n        ensure_geometry(Point(-95.23592948913574, 38.97127105172941))\n\n    def test_ensure_point(self):\n        from django.contrib.gis.geos import GEOSGeometry, Point\n\n        self.assertRaises(\n            SpatialError, ensure_point, [38.97127105172941, -95.23592948913574]\n        )\n        self.assertRaises(\n            SpatialError,\n            ensure_point,\n            GEOSGeometry(\"POLYGON((-95 38, -96 40, -97 42, -95 38))\"),\n        )\n        ensure_point(Point(-95.23592948913574, 38.97127105172941))\n\n    def test_ensure_wgs84(self):\n        from django.contrib.gis.geos import GEOSGeometry, Point\n\n        self.assertRaises(\n            SpatialError,\n            ensure_wgs84,\n            GEOSGeometry(\"POLYGON((-95 38, -96 40, -97 42, -95 38))\"),\n        )\n\n        orig_pnt = Point(-95.23592948913574, 38.97127105172941)\n        std_pnt = ensure_wgs84(orig_pnt)\n        self.assertEqual(orig_pnt.srid, None)\n        self.assertEqual(std_pnt.srid, 4326)\n        self.assertEqual(std_pnt.x, -95.23592948913574)\n        self.assertEqual(std_pnt.y, 38.97127105172941)\n\n        orig_pnt = Point(-95.23592948913574, 38.97127105172941)\n        orig_pnt.srid = 2805\n        std_pnt = ensure_wgs84(orig_pnt)\n        self.assertEqual(orig_pnt.srid, 2805)\n        self.assertEqual(std_pnt.srid, 4326)\n        # These should be different, since it got transformed.\n        self.assertNotEqual(std_pnt.x, -95.23592948913574)\n        self.assertNotEqual(std_pnt.y, 38.97127105172941)\n\n    def test_ensure_distance(self):\n        self.assertRaises(\n            SpatialError, ensure_distance, [38.97127105172941, -95.23592948913574]\n        )\n        ensure_distance(D(mi=5))\n\n    def test_generate_bounding_box(self):\n        from django.contrib.gis.geos import Point\n\n        downtown_bottom_left = Point(-95.23947, 38.9637903)\n        downtown_top_right = Point(-95.23362278938293, 38.973081081164715)\n        ((min_lat, min_lng), (max_lat, max_lng)) = generate_bounding_box(\n            downtown_bottom_left, downtown_top_right\n        )\n        self.assertEqual(min_lat, 38.9637903)\n        self.assertEqual(min_lng, -95.23947)\n        self.assertEqual(max_lat, 38.973081081164715)\n        self.assertEqual(max_lng, -95.23362278938293)\n\n    def test_generate_bounding_box_crossing_line_date(self):\n        from django.contrib.gis.geos import Point\n\n        downtown_bottom_left = Point(95.23947, 38.9637903)\n        downtown_top_right = Point(-95.23362278938293, 38.973081081164715)\n        ((south, west), (north, east)) = generate_bounding_box(\n            downtown_bottom_left, downtown_top_right\n        )\n        self.assertEqual(south, 38.9637903)\n        self.assertEqual(west, 95.23947)\n        self.assertEqual(north, 38.973081081164715)\n        self.assertEqual(east, -95.23362278938293)\n\n\nclass SpatialSolrTestCase(TestCase):\n    fixtures = [\"sample_spatial_data.json\"]\n    using = \"solr\"\n\n    def setUp(self):\n        from django.contrib.gis.geos import Point\n\n        super().setUp()\n        self.ui = connections[self.using].get_unified_index()\n        self.checkindex = self.ui.get_index(Checkin)\n        self.checkindex.reindex(using=self.using)\n        self.sqs = SearchQuerySet().using(self.using)\n\n        self.downtown_pnt = Point(-95.23592948913574, 38.97127105172941)\n        self.downtown_bottom_left = Point(-95.23947, 38.9637903)\n        self.downtown_top_right = Point(-95.23362278938293, 38.973081081164715)\n        self.lawrence_bottom_left = Point(-95.345535, 39.002643)\n        self.lawrence_top_right = Point(-95.202713, 38.923626)\n\n    def tearDown(self):\n        self.checkindex.clear(using=self.using)\n        super().setUp()\n\n    def test_indexing(self):\n        # Make sure the indexed data looks correct.\n        first = Checkin.objects.get(pk=1)\n        sqs = self.sqs.models(Checkin).filter(django_id=first.pk)\n        self.assertEqual(sqs.count(), 1)\n        self.assertEqual(sqs[0].username, first.username)\n        # Make sure we've got a proper ``Point`` object.\n        self.assertAlmostEqual(sqs[0].location.coords[0], first.longitude)\n        self.assertAlmostEqual(sqs[0].location.coords[1], first.latitude)\n\n        # Double-check, to make sure there was nothing accidentally copied\n        # between instances.\n        second = Checkin.objects.get(pk=2)\n        self.assertNotEqual(second.latitude, first.latitude)\n        sqs = self.sqs.models(Checkin).filter(django_id=second.pk)\n        self.assertEqual(sqs.count(), 1)\n        self.assertEqual(sqs[0].username, second.username)\n        self.assertAlmostEqual(sqs[0].location.coords[0], second.longitude)\n        self.assertAlmostEqual(sqs[0].location.coords[1], second.latitude)\n\n    def test_within(self):\n        self.assertEqual(self.sqs.all().count(), 10)\n\n        sqs = self.sqs.within(\n            \"location\", self.downtown_bottom_left, self.downtown_top_right\n        )\n        self.assertEqual(sqs.count(), 7)\n\n        sqs = self.sqs.within(\n            \"location\", self.lawrence_bottom_left, self.lawrence_top_right\n        )\n        self.assertEqual(sqs.count(), 9)\n\n    def test_dwithin(self):\n        self.assertEqual(self.sqs.all().count(), 10)\n\n        sqs = self.sqs.dwithin(\"location\", self.downtown_pnt, D(mi=0.1))\n        self.assertEqual(sqs.count(), 5)\n\n        sqs = self.sqs.dwithin(\"location\", self.downtown_pnt, D(mi=0.5))\n        self.assertEqual(sqs.count(), 7)\n\n        sqs = self.sqs.dwithin(\"location\", self.downtown_pnt, D(mi=100))\n        self.assertEqual(sqs.count(), 10)\n\n    def test_distance_added(self):\n        sqs = self.sqs.within(\n            \"location\", self.downtown_bottom_left, self.downtown_top_right\n        ).distance(\"location\", self.downtown_pnt)\n        self.assertEqual(sqs.count(), 7)\n        self.assertAlmostEqual(sqs[0].distance.mi, 0.01985226)\n        self.assertAlmostEqual(sqs[1].distance.mi, 0.03385863)\n        self.assertAlmostEqual(sqs[2].distance.mi, 0.04539100)\n        self.assertAlmostEqual(sqs[3].distance.mi, 0.04831436)\n        self.assertAlmostEqual(sqs[4].distance.mi, 0.41116546)\n        self.assertAlmostEqual(sqs[5].distance.mi, 0.25098114)\n        self.assertAlmostEqual(sqs[6].distance.mi, 0.04831436)\n\n        sqs = self.sqs.dwithin(\"location\", self.downtown_pnt, D(mi=0.1)).distance(\n            \"location\", self.downtown_pnt\n        )\n        self.assertEqual(sqs.count(), 5)\n        self.assertAlmostEqual(sqs[0].distance.mi, 0.01985226)\n        self.assertAlmostEqual(sqs[1].distance.mi, 0.03385863)\n        self.assertAlmostEqual(sqs[2].distance.mi, 0.04539100)\n        self.assertAlmostEqual(sqs[3].distance.mi, 0.04831436)\n        self.assertAlmostEqual(sqs[4].distance.mi, 0.04831436)\n\n    def test_order_by_distance(self):\n        sqs = (\n            self.sqs.within(\n                \"location\", self.downtown_bottom_left, self.downtown_top_right\n            )\n            .distance(\"location\", self.downtown_pnt)\n            .order_by(\"distance\")\n        )\n        self.assertEqual(sqs.count(), 7)\n        self.assertEqual(\n            [result.pk for result in sqs], [\"8\", \"9\", \"6\", \"3\", \"1\", \"2\", \"5\"]\n        )\n        self.assertEqual(\n            [\"%0.04f\" % result.distance.mi for result in sqs],\n            [\"0.0199\", \"0.0339\", \"0.0454\", \"0.0483\", \"0.0483\", \"0.2510\", \"0.4112\"],\n        )\n\n        sqs = (\n            self.sqs.dwithin(\"location\", self.downtown_pnt, D(mi=0.1))\n            .distance(\"location\", self.downtown_pnt)\n            .order_by(\"distance\")\n        )\n        self.assertEqual(sqs.count(), 5)\n        self.assertEqual([result.pk for result in sqs], [\"8\", \"9\", \"6\", \"3\", \"1\"])\n        self.assertEqual(\n            [\"%0.04f\" % result.distance.mi for result in sqs],\n            [\"0.0199\", \"0.0339\", \"0.0454\", \"0.0483\", \"0.0483\"],\n        )\n\n        sqs = (\n            self.sqs.dwithin(\"location\", self.downtown_pnt, D(mi=0.1))\n            .distance(\"location\", self.downtown_pnt)\n            .order_by(\"-distance\")\n        )\n        self.assertEqual(sqs.count(), 5)\n        self.assertEqual([result.pk for result in sqs], [\"3\", \"1\", \"6\", \"9\", \"8\"])\n        self.assertEqual(\n            [\"%0.04f\" % result.distance.mi for result in sqs],\n            [\"0.0483\", \"0.0483\", \"0.0454\", \"0.0339\", \"0.0199\"],\n        )\n\n    def test_complex(self):\n        sqs = (\n            self.sqs.auto_query(\"coffee\")\n            .within(\"location\", self.downtown_bottom_left, self.downtown_top_right)\n            .distance(\"location\", self.downtown_pnt)\n            .order_by(\"distance\")\n        )\n        self.assertEqual(sqs.count(), 5)\n        self.assertEqual([result.pk for result in sqs], [\"8\", \"6\", \"3\", \"1\", \"2\"])\n        self.assertEqual(\n            [\"%0.04f\" % result.distance.mi for result in sqs],\n            [\"0.0199\", \"0.0454\", \"0.0483\", \"0.0483\", \"0.2510\"],\n        )\n\n        sqs = (\n            self.sqs.auto_query(\"coffee\")\n            .dwithin(\"location\", self.downtown_pnt, D(mi=0.1))\n            .distance(\"location\", self.downtown_pnt)\n            .order_by(\"distance\")\n        )\n        self.assertEqual(sqs.count(), 4)\n        self.assertEqual([result.pk for result in sqs], [\"8\", \"6\", \"3\", \"1\"])\n        self.assertEqual(\n            [\"%0.04f\" % result.distance.mi for result in sqs],\n            [\"0.0199\", \"0.0454\", \"0.0483\", \"0.0483\"],\n        )\n\n        sqs = (\n            self.sqs.auto_query(\"coffee\")\n            .dwithin(\"location\", self.downtown_pnt, D(mi=0.1))\n            .distance(\"location\", self.downtown_pnt)\n            .order_by(\"-distance\")\n        )\n        self.assertEqual(sqs.count(), 4)\n        self.assertEqual([result.pk for result in sqs], [\"3\", \"1\", \"6\", \"8\"])\n        self.assertEqual(\n            [\"%0.04f\" % result.distance.mi for result in sqs],\n            [\"0.0483\", \"0.0483\", \"0.0454\", \"0.0199\"],\n        )\n\n        sqs = (\n            self.sqs.auto_query(\"coffee\")\n            .within(\"location\", self.downtown_bottom_left, self.downtown_top_right)\n            .distance(\"location\", self.downtown_pnt)\n            .order_by(\"-created\")\n        )\n        self.assertEqual(sqs.count(), 5)\n        self.assertEqual([result.pk for result in sqs], [\"8\", \"6\", \"3\", \"2\", \"1\"])\n\n        sqs = (\n            self.sqs.auto_query(\"coffee\")\n            .dwithin(\"location\", self.downtown_pnt, D(mi=0.1))\n            .distance(\"location\", self.downtown_pnt)\n            .order_by(\"-created\")\n        )\n        self.assertEqual(sqs.count(), 4)\n        self.assertEqual([result.pk for result in sqs], [\"8\", \"6\", \"3\", \"1\"])\n"
  },
  {
    "path": "test_haystack/test_altered_internal_names.py",
    "content": "from django.conf import settings\nfrom django.test import TestCase\n\nfrom haystack import connection_router, connections, constants, indexes\nfrom haystack.management.commands.build_solr_schema import Command\nfrom haystack.query import SQ\nfrom haystack.utils.loading import UnifiedIndex\nfrom test_haystack.core.models import AnotherMockModel, MockModel\nfrom test_haystack.utils import check_solr\n\n\nclass MockModelSearchIndex(indexes.SearchIndex, indexes.Indexable):\n    text = indexes.CharField(model_attr=\"foo\", document=True)\n    name = indexes.CharField(model_attr=\"author\")\n    pub_date = indexes.DateTimeField(model_attr=\"pub_date\")\n\n    def get_model(self):\n        return MockModel\n\n\nclass AlteredInternalNamesTestCase(TestCase):\n    def setUp(self):\n        check_solr()\n        super().setUp()\n\n        self.old_ui = connections[\"solr\"].get_unified_index()\n        ui = UnifiedIndex()\n        ui.build(indexes=[MockModelSearchIndex()])\n        connections[\"solr\"]._index = ui\n\n        constants.ID = \"my_id\"\n        constants.DJANGO_CT = \"my_django_ct\"\n        constants.DJANGO_ID = \"my_django_id\"\n\n    def tearDown(self):\n        constants.ID = \"id\"\n        constants.DJANGO_CT = \"django_ct\"\n        constants.DJANGO_ID = \"django_id\"\n        connections[\"solr\"]._index = self.old_ui\n        super().tearDown()\n\n    def test_altered_names(self):\n        sq = connections[\"solr\"].get_query()\n\n        sq.add_filter(SQ(content=\"hello\"))\n        sq.add_model(MockModel)\n        self.assertEqual(sq.build_query(), \"(hello)\")\n\n        sq.add_model(AnotherMockModel)\n        self.assertEqual(sq.build_query(), \"(hello)\")\n\n    def test_solr_schema(self):\n        command = Command()\n        context_data = command.build_context(using=\"solr\")\n        self.assertEqual(len(context_data), 6)\n        self.assertEqual(context_data[\"DJANGO_ID\"], \"my_django_id\")\n        self.assertEqual(context_data[\"content_field_name\"], \"text\")\n        self.assertEqual(context_data[\"DJANGO_CT\"], \"my_django_ct\")\n        self.assertEqual(context_data[\"default_operator\"], \"AND\")\n        self.assertEqual(context_data[\"ID\"], \"my_id\")\n        self.assertEqual(len(context_data[\"fields\"]), 3)\n        self.assertEqual(\n            sorted(context_data[\"fields\"], key=lambda x: x[\"field_name\"]),\n            [\n                {\n                    \"indexed\": \"true\",\n                    \"type\": \"text_en\",\n                    \"stored\": \"true\",\n                    \"field_name\": \"name\",\n                    \"multi_valued\": \"false\",\n                },\n                {\n                    \"indexed\": \"true\",\n                    \"type\": \"date\",\n                    \"stored\": \"true\",\n                    \"field_name\": \"pub_date\",\n                    \"multi_valued\": \"false\",\n                },\n                {\n                    \"indexed\": \"true\",\n                    \"type\": \"text_en\",\n                    \"stored\": \"true\",\n                    \"field_name\": \"text\",\n                    \"multi_valued\": \"false\",\n                },\n            ],\n        )\n\n        schema_xml = command.build_template(using=\"solr\")\n        self.assertTrue(\"<uniqueKey>my_id</uniqueKey>\" in schema_xml)\n        self.assertTrue(\n            '<field name=\"my_id\" type=\"string\" indexed=\"true\" stored=\"true\" multiValued=\"false\" required=\"true\"/>'\n            in schema_xml\n        )\n        self.assertTrue(\n            '<field name=\"my_django_ct\" type=\"string\" indexed=\"true\" stored=\"true\" multiValued=\"false\"/>'\n            in schema_xml\n        )\n"
  },
  {
    "path": "test_haystack/test_app_loading.py",
    "content": "from types import GeneratorType, ModuleType\n\nfrom django.test import TestCase\nfrom django.urls import reverse\n\nfrom haystack.utils import app_loading\n\n\nclass AppLoadingTests(TestCase):\n    def test_load_apps(self):\n        apps = app_loading.haystack_load_apps()\n        self.assertIsInstance(apps, (list, GeneratorType))\n\n        self.assertIn(\"hierarchal_app_django\", apps)\n\n        self.assertNotIn(\n            \"test_app_without_models\",\n            apps,\n            msg=\"haystack_load_apps should exclude apps without defined models\",\n        )\n\n    def test_get_app_modules(self):\n        app_modules = app_loading.haystack_get_app_modules()\n        self.assertIsInstance(app_modules, (list, GeneratorType))\n\n        for i in app_modules:\n            self.assertIsInstance(i, ModuleType)\n\n    def test_get_models_all(self):\n        models = app_loading.haystack_get_models(\"core\")\n        self.assertIsInstance(models, (list, GeneratorType))\n\n    def test_get_models_specific(self):\n        from test_haystack.core.models import MockModel\n\n        models = app_loading.haystack_get_models(\"core.MockModel\")\n        self.assertIsInstance(models, (list, GeneratorType))\n        self.assertListEqual(models, [MockModel])\n\n    def test_hierarchal_app_get_models(self):\n        models = app_loading.haystack_get_models(\"hierarchal_app_django\")\n        self.assertIsInstance(models, (list, GeneratorType))\n        self.assertSetEqual(\n            set(str(i._meta) for i in models),\n            set(\n                (\n                    \"hierarchal_app_django.hierarchalappsecondmodel\",\n                    \"hierarchal_app_django.hierarchalappmodel\",\n                )\n            ),\n        )\n\n    def test_hierarchal_app_specific_model(self):\n        models = app_loading.haystack_get_models(\n            \"hierarchal_app_django.HierarchalAppModel\"\n        )\n        self.assertIsInstance(models, (list, GeneratorType))\n        self.assertSetEqual(\n            set(str(i._meta) for i in models),\n            set((\"hierarchal_app_django.hierarchalappmodel\",)),\n        )\n\n\nclass AppWithoutModelsTests(TestCase):\n    # Confirm that everything works if an app is enabled\n\n    def test_simple_view(self):\n        url = reverse(\"app-without-models:simple-view\")\n        resp = self.client.get(url)\n        self.assertEqual(resp.content.decode(\"utf-8\"), \"OK\")\n"
  },
  {
    "path": "test_haystack/test_app_using_appconfig/__init__.py",
    "content": "default_app_config = \"test_app_using_appconfig.apps.SimpleTestAppConfig\"\n"
  },
  {
    "path": "test_haystack/test_app_using_appconfig/apps.py",
    "content": "from django.apps import AppConfig\n\n\nclass SimpleTestAppConfig(AppConfig):\n    name = \"test_haystack.test_app_using_appconfig\"\n    verbose_name = \"Simple test app using AppConfig\"\n"
  },
  {
    "path": "test_haystack/test_app_using_appconfig/migrations/0001_initial.py",
    "content": "from django.db import migrations, models\n\n\nclass Migration(migrations.Migration):\n\n    dependencies = []\n\n    operations = [\n        migrations.CreateModel(\n            name=\"MicroBlogPost\",\n            fields=[\n                (\n                    \"id\",\n                    models.AutoField(\n                        verbose_name=\"ID\",\n                        serialize=False,\n                        auto_created=True,\n                        primary_key=True,\n                    ),\n                ),\n                (\"text\", models.CharField(max_length=140)),\n            ],\n            options={},\n            bases=(models.Model,),\n        )\n    ]\n"
  },
  {
    "path": "test_haystack/test_app_using_appconfig/migrations/__init__.py",
    "content": ""
  },
  {
    "path": "test_haystack/test_app_using_appconfig/models.py",
    "content": "from django.db.models import CharField, Model\n\n\nclass MicroBlogPost(Model):\n    text = CharField(max_length=140)\n"
  },
  {
    "path": "test_haystack/test_app_using_appconfig/search_indexes.py",
    "content": "from haystack import indexes\n\nfrom .models import MicroBlogPost\n\n\nclass MicroBlogSearchIndex(indexes.SearchIndex, indexes.Indexable):\n    text = indexes.CharField(document=True, use_template=False, model_attr=\"text\")\n\n    def get_model(self):\n        return MicroBlogPost\n"
  },
  {
    "path": "test_haystack/test_app_using_appconfig/tests.py",
    "content": "from django.test import TestCase\n\nfrom .models import MicroBlogPost\n\n\nclass AppConfigTests(TestCase):\n    def test_index_collection(self):\n        from haystack import connections\n\n        unified_index = connections[\"default\"].get_unified_index()\n        models = unified_index.get_indexed_models()\n\n        self.assertIn(MicroBlogPost, models)\n"
  },
  {
    "path": "test_haystack/test_app_with_hierarchy/__init__.py",
    "content": "\"\"\"Test app with multiple hierarchy levels above the actual models.py file\"\"\"\n"
  },
  {
    "path": "test_haystack/test_app_with_hierarchy/contrib/__init__.py",
    "content": ""
  },
  {
    "path": "test_haystack/test_app_with_hierarchy/contrib/django/__init__.py",
    "content": ""
  },
  {
    "path": "test_haystack/test_app_with_hierarchy/contrib/django/hierarchal_app_django/__init__.py",
    "content": ""
  },
  {
    "path": "test_haystack/test_app_with_hierarchy/contrib/django/hierarchal_app_django/models.py",
    "content": "from django.db.models import BooleanField, CharField, Model\n\n\nclass HierarchalAppModel(Model):\n    enabled = BooleanField(default=True)\n\n\nclass HierarchalAppSecondModel(Model):\n    title = CharField(max_length=16)\n"
  },
  {
    "path": "test_haystack/test_app_without_models/__init__.py",
    "content": ""
  },
  {
    "path": "test_haystack/test_app_without_models/urls.py",
    "content": "from django.urls import path\n\nfrom .views import simple_view\n\nurlpatterns = [path(\"simple-view\", simple_view, name=\"simple-view\")]\n"
  },
  {
    "path": "test_haystack/test_app_without_models/views.py",
    "content": "from django.http import HttpResponse\n\n\ndef simple_view(request):\n    return HttpResponse(\"OK\")\n"
  },
  {
    "path": "test_haystack/test_backends.py",
    "content": "import warnings\n\nfrom django.core.exceptions import ImproperlyConfigured\nfrom django.test import TestCase\n\nfrom haystack.utils import loading\n\n\nclass LoadBackendTestCase(TestCase):\n    def test_load_solr(self):\n        try:\n            import pysolr\n        except ImportError:\n            warnings.warn(\n                \"Pysolr doesn't appear to be installed. Unable to test loading the Solr backend.\"\n            )\n            return\n\n        backend = loading.load_backend(\"haystack.backends.solr_backend.SolrEngine\")\n        self.assertEqual(backend.__name__, \"SolrEngine\")\n\n    def test_load_whoosh(self):\n        try:\n            import whoosh\n        except ImportError:\n            warnings.warn(\n                \"Whoosh doesn't appear to be installed. Unable to test loading the Whoosh backend.\"\n            )\n            return\n\n        backend = loading.load_backend(\"haystack.backends.whoosh_backend.WhooshEngine\")\n        self.assertEqual(backend.__name__, \"WhooshEngine\")\n\n    def test_load_elasticsearch(self):\n        try:\n            import elasticsearch\n        except ImportError:\n            warnings.warn(\n                \"elasticsearch-py doesn't appear to be installed. Unable to test loading the ElasticSearch backend.\"\n            )\n            return\n\n        backend = loading.load_backend(\n            \"haystack.backends.elasticsearch_backend.ElasticsearchSearchEngine\"\n        )\n        self.assertEqual(backend.__name__, \"ElasticsearchSearchEngine\")\n\n    def test_load_simple(self):\n        backend = loading.load_backend(\"haystack.backends.simple_backend.SimpleEngine\")\n        self.assertEqual(backend.__name__, \"SimpleEngine\")\n\n    def test_load_nonexistent(self):\n        try:\n            backend = loading.load_backend(\"foobar\")\n            self.fail()\n        except ImproperlyConfigured as e:\n            self.assertEqual(\n                str(e),\n                \"The provided backend 'foobar' is not a complete Python path to a BaseEngine subclass.\",\n            )\n\n        try:\n            backend = loading.load_backend(\"foobar.FooEngine\")\n            self.fail()\n        except ImportError as e:\n            pass\n\n        try:\n            backend = loading.load_backend(\"haystack.backends.simple_backend.FooEngine\")\n            self.fail()\n        except ImportError as e:\n            self.assertEqual(\n                str(e),\n                \"The Python module 'haystack.backends.simple_backend' has no 'FooEngine' class.\",\n            )\n"
  },
  {
    "path": "test_haystack/test_discovery.py",
    "content": "from django.test import TestCase\n\nfrom haystack import connections\nfrom haystack.utils.loading import UnifiedIndex\nfrom test_haystack.discovery.search_indexes import FooIndex\n\nEXPECTED_INDEX_MODEL_COUNT = 6\n\n\nclass ManualDiscoveryTestCase(TestCase):\n    def test_discovery(self):\n        old_ui = connections[\"default\"].get_unified_index()\n        connections[\"default\"]._index = UnifiedIndex()\n        ui = connections[\"default\"].get_unified_index()\n        self.assertEqual(len(ui.get_indexed_models()), EXPECTED_INDEX_MODEL_COUNT)\n\n        ui.build(indexes=[FooIndex()])\n\n        self.assertListEqual(\n            [\"discovery.foo\"], [str(i._meta) for i in ui.get_indexed_models()]\n        )\n\n        ui.build(indexes=[])\n\n        self.assertListEqual([], ui.get_indexed_models())\n        connections[\"default\"]._index = old_ui\n\n\nclass AutomaticDiscoveryTestCase(TestCase):\n    def test_discovery(self):\n        old_ui = connections[\"default\"].get_unified_index()\n        connections[\"default\"]._index = UnifiedIndex()\n        ui = connections[\"default\"].get_unified_index()\n        self.assertEqual(len(ui.get_indexed_models()), EXPECTED_INDEX_MODEL_COUNT)\n\n        # Test exclusions.\n        ui.excluded_indexes = [\"test_haystack.discovery.search_indexes.BarIndex\"]\n        ui.build()\n\n        indexed_model_names = [str(i._meta) for i in ui.get_indexed_models()]\n        self.assertIn(\"multipleindex.foo\", indexed_model_names)\n        self.assertIn(\"multipleindex.bar\", indexed_model_names)\n        self.assertNotIn(\"discovery.bar\", indexed_model_names)\n\n        ui.excluded_indexes = [\n            \"test_haystack.discovery.search_indexes.BarIndex\",\n            \"test_haystack.discovery.search_indexes.FooIndex\",\n        ]\n        ui.build()\n\n        indexed_model_names = [str(i._meta) for i in ui.get_indexed_models()]\n        self.assertIn(\"multipleindex.foo\", indexed_model_names)\n        self.assertIn(\"multipleindex.bar\", indexed_model_names)\n        self.assertListEqual(\n            [], [i for i in indexed_model_names if i.startswith(\"discovery\")]\n        )\n        connections[\"default\"]._index = old_ui\n"
  },
  {
    "path": "test_haystack/test_fields.py",
    "content": "import datetime\nfrom decimal import Decimal\nfrom unittest.mock import Mock\n\nfrom django.template import TemplateDoesNotExist\nfrom django.test import TestCase\n\nfrom haystack.fields import *\nfrom test_haystack.core.models import (\n    ManyToManyLeftSideModel,\n    ManyToManyRightSideModel,\n    MockModel,\n    MockTag,\n    OneToManyLeftSideModel,\n    OneToManyRightSideModel,\n)\n\n\nclass SearchFieldTestCase(TestCase):\n    def test_get_iterable_objects_with_none(self):\n        self.assertEqual([], SearchField.get_iterable_objects(None))\n\n    def test_get_iterable_objects_with_single_non_iterable_object(self):\n        obj = object()\n        expected = [obj]\n\n        self.assertEqual(expected, SearchField.get_iterable_objects(obj))\n\n    def test_get_iterable_objects_with_list_stays_the_same(self):\n        objects = [object(), object()]\n\n        self.assertIs(objects, SearchField.get_iterable_objects(objects))\n\n    def test_get_iterable_objects_with_django_manytomany_rel(self):\n        left_model = ManyToManyLeftSideModel.objects.create()\n        right_model_1 = ManyToManyRightSideModel.objects.create(name=\"Right side 1\")\n        right_model_2 = ManyToManyRightSideModel.objects.create()\n        left_model.related_models.add(right_model_1)\n        left_model.related_models.add(right_model_2)\n\n        result = SearchField.get_iterable_objects(left_model.related_models)\n\n        self.assertTrue(right_model_1 in result)\n        self.assertTrue(right_model_2 in result)\n\n    def test_get_iterable_objects_with_django_onetomany_rel(self):\n        left_model = OneToManyLeftSideModel.objects.create()\n        right_model_1 = OneToManyRightSideModel.objects.create(left_side=left_model)\n        right_model_2 = OneToManyRightSideModel.objects.create(left_side=left_model)\n\n        result = SearchField.get_iterable_objects(left_model.right_side)\n\n        self.assertTrue(right_model_1 in result)\n        self.assertTrue(right_model_2 in result)\n\n    def test_resolve_attributes_lookup_with_field_that_points_to_none(self):\n        related = Mock(spec=[\"none_field\"], none_field=None)\n        obj = Mock(spec=[\"related\"], related=[related])\n\n        field = SearchField(null=False)\n\n        self.assertRaises(\n            SearchFieldError,\n            field.resolve_attributes_lookup,\n            [obj],\n            [\"related\", \"none_field\"],\n        )\n\n    def test_resolve_attributes_lookup_with_field_that_points_to_none_but_is_allowed_to_be_null(\n        self,\n    ):\n        related = Mock(spec=[\"none_field\"], none_field=None)\n        obj = Mock(spec=[\"related\"], related=[related])\n\n        field = SearchField(null=True)\n\n        self.assertEqual(\n            [None], field.resolve_attributes_lookup([obj], [\"related\", \"none_field\"])\n        )\n\n    def test_resolve_attributes_lookup_with_field_that_points_to_none_but_has_default(\n        self,\n    ):\n        related = Mock(spec=[\"none_field\"], none_field=None)\n        obj = Mock(spec=[\"related\"], related=[related])\n\n        field = SearchField(default=\"Default value\")\n\n        self.assertEqual(\n            [\"Default value\"],\n            field.resolve_attributes_lookup([obj], [\"related\", \"none_field\"]),\n        )\n\n    def test_resolve_attributes_lookup_with_deep_relationship(self):\n        related_lvl_2 = Mock(spec=[\"value\"], value=1)\n        related = Mock(spec=[\"related\"], related=[related_lvl_2, related_lvl_2])\n        obj = Mock(spec=[\"related\"], related=[related])\n\n        field = SearchField()\n\n        self.assertEqual(\n            [1, 1],\n            field.resolve_attributes_lookup([obj], [\"related\", \"related\", \"value\"]),\n        )\n\n    def test_resolve_attributes_lookup_with_deep_relationship_through_m2m(self):\n        # obj.related2m:\n        #   - related1\n        #        .deep1\n        #            .value = 1\n        #   - related2\n        #        .deep2\n        #            .value = 2\n        #   - related3\n        #        .deep3\n        #            .value = 3\n        values = [1, 2, 3]\n        deep1, deep2, deep3 = (Mock(spec=[\"value\"], value=x) for x in values)\n        related1, related2, related3 = (\n            Mock(spec=[\"related\"], related=x) for x in (deep1, deep2, deep3)\n        )\n        m2m_rel = Mock(\n            spec=[\"__iter__\"],\n            __iter__=lambda self: iter([related1, related2, related3]),\n        )\n        obj = Mock(spec=[\"related_m2m\"], related_m2m=m2m_rel)\n        field = SearchField()\n        self.assertEqual(\n            values,\n            field.resolve_attributes_lookup([obj], [\"related_m2m\", \"related\", \"value\"]),\n        )\n\n    def test_prepare_with_null_django_onetomany_rel(self):\n        left_model = OneToManyLeftSideModel.objects.create()\n\n        field = SearchField(model_attr=\"right_side__pk\", null=True)\n        result = field.prepare(left_model)\n\n        self.assertEqual(None, result)\n\n\nclass CharFieldTestCase(TestCase):\n    def test_init(self):\n        try:\n            foo = CharField(model_attr=\"foo\")\n        except:\n            self.fail()\n\n    def test_prepare(self):\n        mock = MockModel()\n        mock.user = \"daniel\"\n        author = CharField(model_attr=\"user\")\n\n        self.assertEqual(author.prepare(mock), \"daniel\")\n\n        # Do a lookup through the relation.\n        mock_tag = MockTag.objects.create(name=\"primary\")\n\n        mock = MockModel()\n        mock.tag = mock_tag\n        tag_name = CharField(model_attr=\"tag__name\")\n\n        self.assertEqual(tag_name.prepare(mock), \"primary\")\n\n        # Use the default.\n        mock = MockModel()\n        author = CharField(model_attr=\"author\", default=\"\")\n\n        self.assertEqual(author.prepare(mock), \"\")\n\n        # Simulate failed lookups.\n        mock_tag = MockTag.objects.create(name=\"primary\")\n\n        mock = MockModel()\n        mock.tag = mock_tag\n        tag_slug = CharField(model_attr=\"tag__slug\")\n\n        self.assertRaises(SearchFieldError, tag_slug.prepare, mock)\n\n        # Simulate failed lookups and ensure we don't get a UnicodeDecodeError\n        # in the error message.\n        mock_tag = MockTag.objects.create(name=\"básico\")\n\n        mock = MockModel()\n        mock.tag = mock_tag\n        tag_slug = CharField(model_attr=\"tag__slug\")\n\n        self.assertRaises(SearchFieldError, tag_slug.prepare, mock)\n\n        # Simulate default='foo'.\n        mock = MockModel()\n        default = CharField(default=\"foo\")\n\n        self.assertEqual(default.prepare(mock), \"foo\")\n\n        # Simulate null=True.\n        mock = MockModel()\n        empty = CharField(null=True)\n\n        self.assertEqual(empty.prepare(mock), None)\n\n        mock = MockModel()\n        mock.user = None\n        author = CharField(model_attr=\"user\", null=True)\n\n        self.assertEqual(author.prepare(mock), None)\n\n\nclass NgramFieldTestCase(TestCase):\n    def test_init(self):\n        try:\n            foo = NgramField(model_attr=\"foo\")\n        except:\n            self.fail()\n\n        self.assertRaises(SearchFieldError, NgramField, faceted=True)\n\n    def test_prepare(self):\n        mock = MockModel()\n        mock.user = \"daniel\"\n        author = NgramField(model_attr=\"user\")\n\n        self.assertEqual(author.prepare(mock), \"daniel\")\n\n        # Do a lookup through the relation.\n        mock_tag = MockTag.objects.create(name=\"primary\")\n\n        mock = MockModel()\n        mock.tag = mock_tag\n        tag_name = NgramField(model_attr=\"tag__name\")\n\n        self.assertEqual(tag_name.prepare(mock), \"primary\")\n\n        # Use the default.\n        mock = MockModel()\n        author = NgramField(model_attr=\"author\", default=\"\")\n\n        self.assertEqual(author.prepare(mock), \"\")\n\n        # Simulate failed lookups.\n        mock_tag = MockTag.objects.create(name=\"primary\")\n\n        mock = MockModel()\n        mock.tag = mock_tag\n        tag_slug = NgramField(model_attr=\"tag__slug\")\n\n        self.assertRaises(SearchFieldError, tag_slug.prepare, mock)\n\n        # Simulate default='foo'.\n        mock = MockModel()\n        default = NgramField(default=\"foo\")\n\n        self.assertEqual(default.prepare(mock), \"foo\")\n\n        # Simulate null=True.\n        mock = MockModel()\n        empty = NgramField(null=True)\n\n        self.assertEqual(empty.prepare(mock), None)\n\n        mock = MockModel()\n        mock.user = None\n        author = NgramField(model_attr=\"user\", null=True)\n\n        self.assertEqual(author.prepare(mock), None)\n\n\nclass EdgeNgramFieldTestCase(TestCase):\n    def test_init(self):\n        try:\n            foo = EdgeNgramField(model_attr=\"foo\")\n        except:\n            self.fail()\n\n        self.assertRaises(SearchFieldError, EdgeNgramField, faceted=True)\n\n    def test_prepare(self):\n        mock = MockModel()\n        mock.user = \"daniel\"\n        author = EdgeNgramField(model_attr=\"user\")\n\n        self.assertEqual(author.prepare(mock), \"daniel\")\n\n        # Do a lookup through the relation.\n        mock_tag = MockTag.objects.create(name=\"primary\")\n\n        mock = MockModel()\n        mock.tag = mock_tag\n        tag_name = EdgeNgramField(model_attr=\"tag__name\")\n\n        self.assertEqual(tag_name.prepare(mock), \"primary\")\n\n        # Use the default.\n        mock = MockModel()\n        author = EdgeNgramField(model_attr=\"author\", default=\"\")\n\n        self.assertEqual(author.prepare(mock), \"\")\n\n        # Simulate failed lookups.\n        mock_tag = MockTag.objects.create(name=\"primary\")\n\n        mock = MockModel()\n        mock.tag = mock_tag\n        tag_slug = EdgeNgramField(model_attr=\"tag__slug\")\n\n        self.assertRaises(SearchFieldError, tag_slug.prepare, mock)\n\n        # Simulate default='foo'.\n        mock = MockModel()\n        default = EdgeNgramField(default=\"foo\")\n\n        self.assertEqual(default.prepare(mock), \"foo\")\n\n        # Simulate null=True.\n        mock = MockModel()\n        empty = EdgeNgramField(null=True)\n\n        self.assertEqual(empty.prepare(mock), None)\n\n        mock = MockModel()\n        mock.user = None\n        author = EdgeNgramField(model_attr=\"user\", null=True)\n\n        self.assertEqual(author.prepare(mock), None)\n\n\nclass IntegerFieldTestCase(TestCase):\n    def test_init(self):\n        try:\n            foo = IntegerField(model_attr=\"foo\")\n        except:\n            self.fail()\n\n    def test_prepare(self):\n        mock = MockModel()\n        mock.pk = 1\n        pk = IntegerField(model_attr=\"pk\")\n\n        self.assertEqual(pk.prepare(mock), 1)\n\n        # Simulate failed lookups.\n        mock_tag = MockTag.objects.create(name=\"primary\")\n\n        mock = MockModel()\n        mock.tag = mock_tag\n        tag_count = IntegerField(model_attr=\"tag__count\")\n\n        self.assertRaises(SearchFieldError, tag_count.prepare, mock)\n\n        # Simulate default=1.\n        mock = MockModel()\n        default = IntegerField(default=1)\n\n        self.assertEqual(default.prepare(mock), 1)\n\n        # Simulate null=True.\n        mock = MockModel()\n        pk_none = IntegerField(model_attr=\"pk\", null=True)\n\n        self.assertEqual(pk_none.prepare(mock), None)\n\n\nclass FloatFieldTestCase(TestCase):\n    def test_init(self):\n        try:\n            foo = FloatField(model_attr=\"foo\")\n        except:\n            self.fail()\n\n    def test_prepare(self):\n        mock = MockModel()\n        mock.floaty = 12.5\n        floaty = FloatField(model_attr=\"floaty\")\n\n        self.assertEqual(floaty.prepare(mock), 12.5)\n\n        # Simulate default=1.5.\n        mock = MockModel()\n        default = FloatField(default=1.5)\n\n        self.assertEqual(default.prepare(mock), 1.5)\n\n        # Simulate null=True.\n        mock = MockModel()\n        floaty_none = FloatField(null=True)\n\n        self.assertEqual(floaty_none.prepare(mock), None)\n\n\nclass DecimalFieldTestCase(TestCase):\n    def test_init(self):\n        try:\n            foo = DecimalField(model_attr=\"foo\")\n        except:\n            self.fail()\n\n    def test_prepare(self):\n        mock = MockModel()\n        mock.floaty = Decimal(\"12.5\")\n        floaty = DecimalField(model_attr=\"floaty\")\n\n        self.assertEqual(floaty.prepare(mock), \"12.5\")\n\n        # Simulate default=1.5.\n        mock = MockModel()\n        default = DecimalField(default=\"1.5\")\n\n        self.assertEqual(default.prepare(mock), \"1.5\")\n\n        # Simulate null=True.\n        mock = MockModel()\n        floaty_none = DecimalField(null=True)\n\n        self.assertEqual(floaty_none.prepare(mock), None)\n\n\nclass BooleanFieldTestCase(TestCase):\n    def test_init(self):\n        try:\n            foo = BooleanField(model_attr=\"foo\")\n        except:\n            self.fail()\n\n    def test_prepare(self):\n        mock = MockModel()\n        mock.active = True\n        is_active = BooleanField(model_attr=\"active\")\n\n        self.assertEqual(is_active.prepare(mock), True)\n\n        # Simulate default=True.\n        mock = MockModel()\n        default = BooleanField(default=True)\n\n        self.assertEqual(default.prepare(mock), True)\n\n        # Simulate null=True.\n        mock = MockModel()\n        booly_none = BooleanField(null=True)\n\n        self.assertEqual(booly_none.prepare(mock), None)\n\n\nclass DateFieldTestCase(TestCase):\n    def test_init(self):\n        try:\n            foo = DateField(model_attr=\"foo\")\n        except:\n            self.fail()\n\n    def test_convert(self):\n        pub_date = DateField()\n        self.assertEqual(pub_date.convert(\"2016-02-16\"), datetime.date(2016, 2, 16))\n\n    def test_prepare(self):\n        mock = MockModel()\n        mock.pub_date = datetime.date(2009, 2, 13)\n        pub_date = DateField(model_attr=\"pub_date\")\n\n        self.assertEqual(pub_date.prepare(mock), datetime.date(2009, 2, 13))\n\n        # Simulate default=datetime.date(2000, 1, 1).\n        mock = MockModel()\n        default = DateField(default=datetime.date(2000, 1, 1))\n\n        self.assertEqual(default.prepare(mock), datetime.date(2000, 1, 1))\n\n    def test_prepare_from_string(self):\n        mock = MockModel()\n        mock.pub_date = datetime.date(2016, 2, 16)\n        pub_date = DateField(model_attr=\"pub_date\")\n\n        self.assertEqual(pub_date.prepare(mock), datetime.date(2016, 2, 16))\n\n\nclass DateTimeFieldTestCase(TestCase):\n    def test_init(self):\n        try:\n            foo = DateTimeField(model_attr=\"foo\")\n        except:\n            self.fail()\n\n    def test_convert(self):\n        pub_date = DateTimeField()\n\n        self.assertEqual(\n            pub_date.convert(\"2016-02-16T10:02:03\"),\n            datetime.datetime(2016, 2, 16, 10, 2, 3),\n        )\n\n    def test_prepare(self):\n        mock = MockModel()\n        mock.pub_date = datetime.datetime(2009, 2, 13, 10, 1, 0)\n        pub_date = DateTimeField(model_attr=\"pub_date\")\n\n        self.assertEqual(\n            pub_date.prepare(mock), datetime.datetime(2009, 2, 13, 10, 1, 0)\n        )\n\n        # Simulate default=datetime.datetime(2009, 2, 13, 10, 01, 00).\n        mock = MockModel()\n        default = DateTimeField(default=datetime.datetime(2000, 1, 1, 0, 0, 0))\n\n        self.assertEqual(default.prepare(mock), datetime.datetime(2000, 1, 1, 0, 0, 0))\n\n    def test_prepare_from_string(self):\n        mock = MockModel()\n        mock.pub_date = \"2016-02-16T10:01:02Z\"\n        pub_date = DateTimeField(model_attr=\"pub_date\")\n\n        self.assertEqual(\n            pub_date.prepare(mock), datetime.datetime(2016, 2, 16, 10, 1, 2)\n        )\n\n\nclass MultiValueFieldTestCase(TestCase):\n    def test_init(self):\n        try:\n            foo = MultiValueField(model_attr=\"foo\")\n        except:\n            self.fail()\n\n        self.assertRaises(SearchFieldError, MultiValueField, use_template=True)\n\n    def test_prepare(self):\n        mock = MockModel()\n        mock.sites = [\"3\", \"4\", \"5\"]\n        sites = MultiValueField(model_attr=\"sites\")\n\n        self.assertEqual(sites.prepare(mock), [\"3\", \"4\", \"5\"])\n\n        # Simulate default=[1].\n        mock = MockModel()\n        default = MultiValueField(default=[1])\n\n        self.assertEqual(default.prepare(mock), [1])\n\n        # Simulate null=True.\n        mock = MockModel()\n        multy_none = MultiValueField(null=True)\n\n        self.assertEqual(multy_none.prepare(mock), None)\n\n    def test_convert_with_single_string(self):\n        field = MultiValueField()\n\n        self.assertEqual([\"String\"], field.convert(\"String\"))\n\n    def test_convert_with_single_int(self):\n        field = MultiValueField()\n\n        self.assertEqual([1], field.convert(1))\n\n    def test_convert_with_list_of_strings(self):\n        field = MultiValueField()\n\n        self.assertEqual(\n            [\"String 1\", \"String 2\"], field.convert([\"String 1\", \"String 2\"])\n        )\n\n    def test_convert_with_list_of_ints(self):\n        field = MultiValueField()\n\n        self.assertEqual([1, 2, 3], field.convert([1, 2, 3]))\n\n\nclass CharFieldWithTemplateTestCase(TestCase):\n    def test_init(self):\n        try:\n            foo = CharField(use_template=True)\n        except:\n            self.fail()\n\n        try:\n            foo = CharField(use_template=True, template_name=\"foo.txt\")\n        except:\n            self.fail()\n\n        foo = CharField(use_template=True, template_name=\"foo.txt\")\n        self.assertEqual(foo.template_name, \"foo.txt\")\n\n        # Test the select_template usage.\n        foo = CharField(use_template=True, template_name=[\"bar.txt\", \"foo.txt\"])\n        self.assertEqual(foo.template_name, [\"bar.txt\", \"foo.txt\"])\n\n    def test_prepare(self):\n        mock = MockModel()\n        mock.pk = 1\n        mock.user = \"daniel\"\n        template1 = CharField(use_template=True)\n\n        self.assertRaises(SearchFieldError, template1.prepare, mock)\n\n        template2 = CharField(use_template=True)\n        template2.instance_name = \"template_x\"\n        self.assertRaises(TemplateDoesNotExist, template2.prepare, mock)\n\n        template3 = CharField(use_template=True)\n        template3.instance_name = \"template\"\n        self.assertEqual(template3.prepare(mock), \"Indexed!\\n1\")\n\n        template4 = CharField(use_template=True, template_name=\"search/indexes/foo.txt\")\n        template4.instance_name = \"template\"\n        self.assertEqual(template4.prepare(mock), \"FOO!\\n\")\n\n        template5 = CharField(\n            use_template=True, template_name=[\"foo.txt\", \"search/indexes/bar.txt\"]\n        )\n        template5.instance_name = \"template\"\n        self.assertEqual(template5.prepare(mock), \"BAR!\\n\")\n\n\n##############################################################################\n# The following tests look like they don't do much, but it's important because\n# we need to verify that the faceted variants behave like the field they\n# emulate. The old-broke behavior was convert everything to string.\n##############################################################################\n\n\nclass FacetFieldTestCase(TestCase):\n    def test_init(self):\n        # You shouldn't use the FacetField itself.\n        try:\n            foo = FacetField(model_attr=\"foo\")\n            self.fail()\n        except:\n            pass\n\n        try:\n            foo_exact = FacetField(facet_for=\"bar\")\n            self.fail()\n        except:\n            pass\n\n\nclass FacetCharFieldTestCase(TestCase):\n    def test_init(self):\n        try:\n            foo = FacetCharField(model_attr=\"foo\")\n            foo_exact = FacetCharField(facet_for=\"bar\")\n        except:\n            self.fail()\n\n        self.assertEqual(foo.facet_for, None)\n        self.assertEqual(foo_exact.null, True)\n        self.assertEqual(foo_exact.facet_for, \"bar\")\n\n    def test_prepare(self):\n        mock = MockModel()\n        mock.user = \"daniel\"\n        author = FacetCharField(model_attr=\"user\")\n\n        self.assertEqual(author.prepare(mock), \"daniel\")\n\n\nclass FacetIntegerFieldTestCase(TestCase):\n    def test_init(self):\n        try:\n            foo = FacetIntegerField(model_attr=\"foo\")\n            foo_exact = FacetIntegerField(facet_for=\"bar\")\n        except:\n            self.fail()\n\n        self.assertEqual(foo.facet_for, None)\n        self.assertEqual(foo_exact.null, True)\n        self.assertEqual(foo_exact.facet_for, \"bar\")\n\n    def test_prepare(self):\n        mock = MockModel()\n        mock.user = \"daniel\"\n        mock.view_count = 13\n        view_count = FacetIntegerField(model_attr=\"view_count\")\n\n        self.assertEqual(view_count.prepare(mock), 13)\n\n\nclass FacetFloatFieldTestCase(TestCase):\n    def test_init(self):\n        try:\n            foo = FacetFloatField(model_attr=\"foo\")\n            foo_exact = FacetFloatField(facet_for=\"bar\")\n        except:\n            self.fail()\n\n        self.assertEqual(foo.facet_for, None)\n        self.assertEqual(foo_exact.null, True)\n        self.assertEqual(foo_exact.facet_for, \"bar\")\n\n    def test_prepare(self):\n        mock = MockModel()\n        mock.user = \"daniel\"\n        mock.price = 25.65\n        price = FacetFloatField(model_attr=\"price\")\n\n        self.assertEqual(price.prepare(mock), 25.65)\n\n\nclass FacetBooleanFieldTestCase(TestCase):\n    def test_init(self):\n        try:\n            foo = FacetBooleanField(model_attr=\"foo\")\n            foo_exact = FacetBooleanField(facet_for=\"bar\")\n        except:\n            self.fail()\n\n        self.assertEqual(foo.facet_for, None)\n        self.assertEqual(foo_exact.null, True)\n        self.assertEqual(foo_exact.facet_for, \"bar\")\n\n    def test_prepare(self):\n        mock = MockModel()\n        mock.user = \"daniel\"\n        mock.is_active = True\n        is_active = FacetBooleanField(model_attr=\"is_active\")\n\n        self.assertEqual(is_active.prepare(mock), True)\n\n\nclass FacetDateFieldTestCase(TestCase):\n    def test_init(self):\n        try:\n            foo = FacetDateField(model_attr=\"foo\")\n            foo_exact = FacetDateField(facet_for=\"bar\")\n        except:\n            self.fail()\n\n        self.assertEqual(foo.facet_for, None)\n        self.assertEqual(foo_exact.null, True)\n        self.assertEqual(foo_exact.facet_for, \"bar\")\n\n    def test_prepare(self):\n        mock = MockModel()\n        mock.user = \"daniel\"\n        mock.created = datetime.date(2010, 10, 30)\n        created = FacetDateField(model_attr=\"created\")\n\n        self.assertEqual(created.prepare(mock), datetime.date(2010, 10, 30))\n\n\nclass FacetDateTimeFieldTestCase(TestCase):\n    def test_init(self):\n        try:\n            foo = FacetDateTimeField(model_attr=\"foo\")\n            foo_exact = FacetDateTimeField(facet_for=\"bar\")\n        except:\n            self.fail()\n\n        self.assertEqual(foo.facet_for, None)\n        self.assertEqual(foo_exact.null, True)\n        self.assertEqual(foo_exact.facet_for, \"bar\")\n\n    def test_prepare(self):\n        mock = MockModel()\n        mock.user = \"daniel\"\n        mock.created = datetime.datetime(2010, 10, 30, 3, 14, 25)\n        created = FacetDateTimeField(model_attr=\"created\")\n\n        self.assertEqual(\n            created.prepare(mock), datetime.datetime(2010, 10, 30, 3, 14, 25)\n        )\n\n\nclass FacetMultiValueFieldTestCase(TestCase):\n    def test_init(self):\n        try:\n            foo = FacetMultiValueField(model_attr=\"foo\")\n            foo_exact = FacetMultiValueField(facet_for=\"bar\")\n        except:\n            self.fail()\n\n        self.assertEqual(foo.facet_for, None)\n        self.assertEqual(foo_exact.null, True)\n        self.assertEqual(foo_exact.facet_for, \"bar\")\n\n    def test_prepare(self):\n        mock = MockModel()\n        mock.user = \"daniel\"\n        mock.sites = [1, 3, 4]\n        sites = FacetMultiValueField(model_attr=\"sites\")\n\n        self.assertEqual(sites.prepare(mock), [1, 3, 4])\n"
  },
  {
    "path": "test_haystack/test_forms.py",
    "content": "from django.test import TestCase\n\nfrom haystack import connection_router, connections\nfrom haystack.forms import FacetedSearchForm, ModelSearchForm, SearchForm, model_choices\nfrom haystack.query import EmptySearchQuerySet, SearchQuerySet\nfrom haystack.utils.loading import UnifiedIndex\nfrom test_haystack.core.models import AnotherMockModel, MockModel\nfrom test_haystack.test_views import (\n    BasicAnotherMockModelSearchIndex,\n    BasicMockModelSearchIndex,\n)\n\n\nclass SearchFormTestCase(TestCase):\n    def setUp(self):\n        super().setUp()\n\n        # Stow.\n        self.old_unified_index = connections[\"default\"]._index\n        self.ui = UnifiedIndex()\n        self.bmmsi = BasicMockModelSearchIndex()\n        self.bammsi = BasicAnotherMockModelSearchIndex()\n        self.ui.build(indexes=[self.bmmsi, self.bammsi])\n        connections[\"default\"]._index = self.ui\n\n        # Update the \"index\".\n        backend = connections[\"default\"].get_backend()\n        backend.clear()\n        backend.update(self.bmmsi, MockModel.objects.all())\n\n        self.sqs = SearchQuerySet()\n\n    def tearDown(self):\n        connections[\"default\"]._index = self.old_unified_index\n        super().tearDown()\n\n    def test_unbound(self):\n        sf = SearchForm({}, searchqueryset=self.sqs)\n\n        self.assertEqual(sf.errors, {})\n        self.assertEqual(sf.is_valid(), True)\n\n        # This shouldn't blow up.\n        sqs = sf.search()\n        self.assertTrue(isinstance(sqs, EmptySearchQuerySet))\n\n\nclass ModelSearchFormTestCase(TestCase):\n    def setUp(self):\n        super().setUp()\n        # Stow.\n        self.old_unified_index = connections[\"default\"]._index\n        self.ui = UnifiedIndex()\n        self.bmmsi = BasicMockModelSearchIndex()\n        self.bammsi = BasicAnotherMockModelSearchIndex()\n        self.ui.build(indexes=[self.bmmsi, self.bammsi])\n        connections[\"default\"]._index = self.ui\n\n        # Update the \"index\".\n        backend = connections[\"default\"].get_backend()\n        backend.clear()\n        backend.update(self.bmmsi, MockModel.objects.all())\n\n        self.sqs = SearchQuerySet()\n\n    def tearDown(self):\n        connections[\"default\"]._index = self.old_unified_index\n        super().tearDown()\n\n    def test_models_regression_1(self):\n        # Regression for issue #1.\n        msf = ModelSearchForm(\n            {\"query\": \"test\", \"models\": [\"core.mockmodel\", \"core.anothermockmodel\"]},\n            searchqueryset=self.sqs,\n        )\n\n        self.assertEqual(\n            msf.fields[\"models\"].choices,\n            [\n                (\"core.anothermockmodel\", \"Another mock models\"),\n                (\"core.mockmodel\", \"Mock models\"),\n            ],\n        )\n        self.assertEqual(msf.errors, {})\n        self.assertEqual(msf.is_valid(), True)\n\n        sqs_with_models = msf.search()\n        self.assertEqual(len(sqs_with_models.query.models), 2)\n\n    def test_model_choices(self):\n        self.assertEqual(len(model_choices()), 2)\n        self.assertEqual(\n            [option[1] for option in model_choices()],\n            [\"Another mock models\", \"Mock models\"],\n        )\n\n    def test_model_choices_unicode(self):\n        stowed_verbose_name_plural = MockModel._meta.verbose_name_plural\n        MockModel._meta.verbose_name_plural = \"☃\"\n        self.assertEqual(len(model_choices()), 2)\n        self.assertEqual(\n            [option[1] for option in model_choices()], [\"Another mock models\", \"☃\"]\n        )\n        MockModel._meta.verbose_name_plural = stowed_verbose_name_plural\n\n\nclass FacetedSearchFormTestCase(TestCase):\n    def setUp(self):\n        super().setUp()\n        # Stow.\n        self.old_unified_index = connections[\"default\"]._index\n        self.ui = UnifiedIndex()\n        self.bmmsi = BasicMockModelSearchIndex()\n        self.bammsi = BasicAnotherMockModelSearchIndex()\n        self.ui.build(indexes=[self.bmmsi, self.bammsi])\n        connections[\"default\"]._index = self.ui\n\n        # Update the \"index\".\n        backend = connections[\"default\"].get_backend()\n        backend.clear()\n        backend.update(self.bmmsi, MockModel.objects.all())\n\n        self.sqs = SearchQuerySet()\n\n    def tearDown(self):\n        connections[\"default\"]._index = self.old_unified_index\n        super().tearDown()\n\n    def test_init_with_selected_facets(self):\n        sf = FacetedSearchForm({}, searchqueryset=self.sqs)\n        self.assertEqual(sf.errors, {})\n        self.assertEqual(sf.is_valid(), True)\n        self.assertEqual(sf.selected_facets, [])\n\n        sf = FacetedSearchForm({}, selected_facets=[], searchqueryset=self.sqs)\n        self.assertEqual(sf.errors, {})\n        self.assertEqual(sf.is_valid(), True)\n        self.assertEqual(sf.selected_facets, [])\n\n        sf = FacetedSearchForm(\n            {}, selected_facets=[\"author:daniel\"], searchqueryset=self.sqs\n        )\n        self.assertEqual(sf.errors, {})\n        self.assertEqual(sf.is_valid(), True)\n        self.assertEqual(sf.selected_facets, [\"author:daniel\"])\n\n        sf = FacetedSearchForm(\n            {},\n            selected_facets=[\"author:daniel\", \"author:chris\"],\n            searchqueryset=self.sqs,\n        )\n        self.assertEqual(sf.errors, {})\n        self.assertEqual(sf.is_valid(), True)\n        self.assertEqual(sf.selected_facets, [\"author:daniel\", \"author:chris\"])\n\n    def test_search(self):\n        sf = FacetedSearchForm(\n            {\"q\": \"test\"}, selected_facets=[], searchqueryset=self.sqs\n        )\n        sqs = sf.search()\n        self.assertEqual(sqs.query.narrow_queries, set())\n\n        # Test the \"skip no-colon\" bits.\n        sf = FacetedSearchForm(\n            {\"q\": \"test\"}, selected_facets=[\"authordaniel\"], searchqueryset=self.sqs\n        )\n        sqs = sf.search()\n        self.assertEqual(sqs.query.narrow_queries, set())\n\n        sf = FacetedSearchForm(\n            {\"q\": \"test\"}, selected_facets=[\"author:daniel\"], searchqueryset=self.sqs\n        )\n        sqs = sf.search()\n        self.assertEqual(sqs.query.narrow_queries, set(['author:\"daniel\"']))\n\n        sf = FacetedSearchForm(\n            {\"q\": \"test\"},\n            selected_facets=[\"author:daniel\", \"author:chris\"],\n            searchqueryset=self.sqs,\n        )\n        sqs = sf.search()\n        self.assertEqual(\n            sqs.query.narrow_queries, set(['author:\"daniel\"', 'author:\"chris\"'])\n        )\n"
  },
  {
    "path": "test_haystack/test_generic_views.py",
    "content": "from django.test.client import RequestFactory\nfrom django.test.testcases import TestCase\n\nfrom haystack.forms import ModelSearchForm\nfrom haystack.generic_views import SearchView\n\n\nclass GenericSearchViewsTestCase(TestCase):\n    \"\"\"Test case for the generic search views.\"\"\"\n\n    def setUp(self):\n        super().setUp()\n        self.query = \"haystack\"\n        self.request = self.get_request(url=\"/some/random/url?q={0}\".format(self.query))\n\n    def test_get_form_kwargs(self):\n        \"\"\"Test getting the search view form kwargs.\"\"\"\n        v = SearchView()\n        v.request = self.request\n\n        form_kwargs = v.get_form_kwargs()\n        self.assertEqual(form_kwargs.get(\"data\").get(\"q\"), self.query)\n        self.assertEqual(form_kwargs.get(\"initial\"), {})\n        self.assertTrue(\"searchqueryset\" in form_kwargs)\n        self.assertTrue(\"load_all\" in form_kwargs)\n\n    def test_search_view_response(self):\n        \"\"\"Test the generic SearchView response.\"\"\"\n        response = SearchView.as_view()(request=self.request)\n\n        context = response.context_data\n        self.assertEqual(context[\"query\"], self.query)\n        self.assertEqual(context.get(\"view\").__class__, SearchView)\n        self.assertEqual(context.get(\"form\").__class__, ModelSearchForm)\n        self.assertIn(\"page_obj\", context)\n        self.assertNotIn(\"page\", context)\n\n    def test_search_view_form_valid(self):\n        \"\"\"Test the generic SearchView form is valid.\"\"\"\n        v = SearchView()\n        v.kwargs = {}\n        v.request = self.request\n\n        form = v.get_form(v.get_form_class())\n        response = v.form_valid(form)\n        context = response.context_data\n\n        self.assertEqual(context[\"query\"], self.query)\n\n    def test_search_view_form_invalid(self):\n        \"\"\"Test the generic SearchView form is invalid.\"\"\"\n        v = SearchView()\n        v.kwargs = {}\n        v.request = self.request\n\n        form = v.get_form(v.get_form_class())\n        response = v.form_invalid(form)\n        context = response.context_data\n\n        self.assertTrue(\"query\" not in context)\n\n    def get_request(self, url, method=\"get\", data=None, **kwargs):\n        \"\"\"Gets the request object for the view.\n\n        :param url: a mock url to use for the request\n        :param method: the http method to use for the request ('get', 'post',\n            etc).\n        \"\"\"\n        factory = RequestFactory()\n        factory_func = getattr(factory, method)\n\n        request = factory_func(url, data=data or {}, **kwargs)\n        return request\n"
  },
  {
    "path": "test_haystack/test_indexes.py",
    "content": "import datetime\nimport queue\nimport time\nfrom threading import Thread\n\nfrom django.test import TestCase\n\nfrom haystack import connections, indexes\nfrom haystack.exceptions import SearchFieldError\nfrom haystack.utils.loading import UnifiedIndex\nfrom test_haystack.core.models import (\n    AFifthMockModel,\n    AnotherMockModel,\n    AThirdMockModel,\n    ManyToManyLeftSideModel,\n    ManyToManyRightSideModel,\n    MockModel,\n)\n\n\nclass BadSearchIndex1(indexes.SearchIndex, indexes.Indexable):\n    author = indexes.CharField(model_attr=\"author\")\n    pub_date = indexes.DateTimeField(model_attr=\"pub_date\")\n\n    def get_model(self):\n        return MockModel\n\n\nclass BadSearchIndex2(indexes.SearchIndex, indexes.Indexable):\n    text = indexes.CharField(document=True, use_template=True)\n    content2 = indexes.CharField(document=True, use_template=True)\n    author = indexes.CharField(model_attr=\"author\")\n    pub_date = indexes.DateTimeField(model_attr=\"pub_date\")\n\n    def get_model(self):\n        return MockModel\n\n\nclass GoodMockSearchIndex(indexes.SearchIndex, indexes.Indexable):\n    text = indexes.CharField(document=True, use_template=True)\n    author = indexes.CharField(model_attr=\"author\")\n    pub_date = indexes.DateTimeField(model_attr=\"pub_date\")\n    extra = indexes.CharField(indexed=False, use_template=True)\n\n    def get_model(self):\n        return MockModel\n\n\n# For testing inheritance...\nclass AltGoodMockSearchIndex(GoodMockSearchIndex, indexes.Indexable):\n    additional = indexes.CharField(model_attr=\"author\")\n\n    def get_model(self):\n        return MockModel\n\n\nclass GoodCustomMockSearchIndex(indexes.SearchIndex, indexes.Indexable):\n    text = indexes.CharField(document=True, use_template=True)\n    author = indexes.CharField(model_attr=\"author\", faceted=True)\n    pub_date = indexes.DateTimeField(model_attr=\"pub_date\", faceted=True)\n    extra = indexes.CharField(indexed=False, use_template=True)\n    hello = indexes.CharField(model_attr=\"hello\")\n\n    def prepare(self, obj):\n        super().prepare(obj)\n        self.prepared_data[\"whee\"] = \"Custom preparation.\"\n        return self.prepared_data\n\n    def prepare_author(self, obj):\n        return \"Hi, I'm %s\" % self.prepared_data[\"author\"]\n\n    def load_all_queryset(self):\n        return self.get_model()._default_manager.filter(id__gt=1)\n\n    def get_model(self):\n        return MockModel\n\n    def index_queryset(self, using=None):\n        return MockModel.objects.all()\n\n    def read_queryset(self, using=None):\n        return MockModel.objects.filter(author__in=[\"daniel1\", \"daniel3\"])\n\n    def build_queryset(self, start_date=None, end_date=None):\n        return MockModel.objects.filter(author__in=[\"daniel1\", \"daniel3\"])\n\n\nclass GoodNullableMockSearchIndex(indexes.SearchIndex, indexes.Indexable):\n    text = indexes.CharField(document=True, use_template=True)\n    author = indexes.CharField(model_attr=\"author\", null=True, faceted=True)\n\n    def get_model(self):\n        return MockModel\n\n\nclass GoodOverriddenFieldNameMockSearchIndex(indexes.SearchIndex, indexes.Indexable):\n    text = indexes.CharField(\n        document=True, use_template=True, index_fieldname=\"more_content\"\n    )\n    author = indexes.CharField(model_attr=\"author\", index_fieldname=\"name_s\")\n    hello = indexes.CharField(model_attr=\"hello\")\n\n    def get_model(self):\n        return MockModel\n\n\nclass GoodFacetedMockSearchIndex(indexes.SearchIndex, indexes.Indexable):\n    text = indexes.CharField(document=True, use_template=True)\n    author = indexes.CharField(model_attr=\"author\")\n    author_foo = indexes.FacetCharField(facet_for=\"author\")\n    pub_date = indexes.DateTimeField(model_attr=\"pub_date\")\n    pub_date_exact = indexes.FacetDateTimeField(facet_for=\"pub_date\")\n\n    def get_model(self):\n        return MockModel\n\n    def prepare_author(self, obj):\n        return \"Hi, I'm %s\" % self.prepared_data[\"author\"]\n\n    def prepare_pub_date_exact(self, obj):\n        return \"2010-10-26T01:54:32\"\n\n\nclass MROFieldsSearchIndexA(indexes.SearchIndex, indexes.Indexable):\n    text = indexes.CharField(document=True, model_attr=\"test_a\")\n\n    def get_model(self):\n        return MockModel\n\n\nclass MROFieldsSearchIndexB(indexes.SearchIndex, indexes.Indexable):\n    text = indexes.CharField(document=True, model_attr=\"test_b\")\n\n    def get_model(self):\n        return MockModel\n\n\nclass MROFieldsSearchChild(MROFieldsSearchIndexA, MROFieldsSearchIndexB):\n    pass\n\n\nclass ModelWithManyToManyFieldAndAttributeLookupSearchIndex(\n    indexes.SearchIndex, indexes.Indexable\n):\n    text = indexes.CharField(document=True)\n    related_models = indexes.MultiValueField(model_attr=\"related_models__name\")\n\n    def get_model(self):\n        return ManyToManyLeftSideModel\n\n\nclass SearchIndexTestCase(TestCase):\n    fixtures = [\"base_data\"]\n\n    def setUp(self):\n        super().setUp()\n        self.sb = connections[\"default\"].get_backend()\n        self.mi = GoodMockSearchIndex()\n        self.cmi = GoodCustomMockSearchIndex()\n        self.cnmi = GoodNullableMockSearchIndex()\n        self.gfmsi = GoodFacetedMockSearchIndex()\n\n        # Fake the unified index.\n        self.old_unified_index = connections[\"default\"]._index\n        self.ui = UnifiedIndex()\n        self.ui.build(indexes=[self.mi])\n        connections[\"default\"]._index = self.ui\n\n        self.sample_docs = {\n            \"core.mockmodel.1\": {\n                \"text\": \"Indexed!\\n1\",\n                \"django_id\": \"1\",\n                \"django_ct\": \"core.mockmodel\",\n                \"extra\": \"Stored!\\n1\",\n                \"author\": \"daniel1\",\n                \"pub_date\": datetime.datetime(2009, 3, 17, 6, 0),\n                \"id\": \"core.mockmodel.1\",\n            },\n            \"core.mockmodel.2\": {\n                \"text\": \"Indexed!\\n2\",\n                \"django_id\": \"2\",\n                \"django_ct\": \"core.mockmodel\",\n                \"extra\": \"Stored!\\n2\",\n                \"author\": \"daniel2\",\n                \"pub_date\": datetime.datetime(2009, 3, 17, 7, 0),\n                \"id\": \"core.mockmodel.2\",\n            },\n            \"core.mockmodel.3\": {\n                \"text\": \"Indexed!\\n3\",\n                \"django_id\": \"3\",\n                \"django_ct\": \"core.mockmodel\",\n                \"extra\": \"Stored!\\n3\",\n                \"author\": \"daniel3\",\n                \"pub_date\": datetime.datetime(2009, 3, 17, 8, 0),\n                \"id\": \"core.mockmodel.3\",\n            },\n        }\n\n    def tearDown(self):\n        connections[\"default\"]._index = self.old_unified_index\n        super().tearDown()\n\n    def test_no_contentfield_present(self):\n        self.assertRaises(SearchFieldError, BadSearchIndex1)\n\n    def test_too_many_contentfields_present(self):\n        self.assertRaises(SearchFieldError, BadSearchIndex2)\n\n    def test_contentfield_present(self):\n        try:\n            mi = GoodMockSearchIndex()\n        except:\n            self.fail()\n\n    def test_proper_fields(self):\n        self.assertEqual(len(self.mi.fields), 4)\n        self.assertTrue(\"text\" in self.mi.fields)\n        self.assertTrue(isinstance(self.mi.fields[\"text\"], indexes.CharField))\n        self.assertTrue(\"author\" in self.mi.fields)\n        self.assertTrue(isinstance(self.mi.fields[\"author\"], indexes.CharField))\n        self.assertTrue(\"pub_date\" in self.mi.fields)\n        self.assertTrue(isinstance(self.mi.fields[\"pub_date\"], indexes.DateTimeField))\n        self.assertTrue(\"extra\" in self.mi.fields)\n        self.assertTrue(isinstance(self.mi.fields[\"extra\"], indexes.CharField))\n\n        self.assertEqual(len(self.cmi.fields), 7)\n        self.assertTrue(\"text\" in self.cmi.fields)\n        self.assertTrue(isinstance(self.cmi.fields[\"text\"], indexes.CharField))\n        self.assertTrue(\"author\" in self.cmi.fields)\n        self.assertTrue(isinstance(self.cmi.fields[\"author\"], indexes.CharField))\n        self.assertTrue(\"author_exact\" in self.cmi.fields)\n        self.assertTrue(\n            isinstance(self.cmi.fields[\"author_exact\"], indexes.FacetCharField)\n        )\n        self.assertTrue(\"pub_date\" in self.cmi.fields)\n        self.assertTrue(isinstance(self.cmi.fields[\"pub_date\"], indexes.DateTimeField))\n        self.assertTrue(\"pub_date_exact\" in self.cmi.fields)\n        self.assertTrue(\n            isinstance(self.cmi.fields[\"pub_date_exact\"], indexes.FacetDateTimeField)\n        )\n        self.assertTrue(\"extra\" in self.cmi.fields)\n        self.assertTrue(isinstance(self.cmi.fields[\"extra\"], indexes.CharField))\n        self.assertTrue(\"hello\" in self.cmi.fields)\n        self.assertTrue(isinstance(self.cmi.fields[\"extra\"], indexes.CharField))\n\n    def test_index_queryset(self):\n        self.assertEqual(self.cmi.index_queryset().count(), 3)\n\n    def test_read_queryset(self):\n        self.assertEqual(self.cmi.read_queryset().count(), 2)\n\n    def test_build_queryset(self):\n        # The custom SearchIndex.build_queryset returns the same records as\n        # the read_queryset\n        self.assertEqual(self.cmi.build_queryset().count(), 2)\n\n        # Store a reference to the original method\n        old_guf = self.mi.__class__.get_updated_field\n\n        self.mi.__class__.get_updated_field = lambda self: \"pub_date\"\n\n        # With an updated field, we should get have filtered results\n        sd = datetime.datetime(2009, 3, 17, 7, 0)\n        self.assertEqual(len(self.mi.build_queryset(start_date=sd)), 2)\n\n        ed = datetime.datetime(2009, 3, 17, 7, 59)\n        self.assertEqual(len(self.mi.build_queryset(end_date=ed)), 2)\n\n        sd = datetime.datetime(2009, 3, 17, 6, 0)\n        ed = datetime.datetime(2009, 3, 17, 6, 59)\n        self.assertEqual(len(self.mi.build_queryset(start_date=sd, end_date=ed)), 1)\n\n        # Remove the updated field for the next test\n        del self.mi.__class__.get_updated_field\n\n        # The default should return all 3 even if we specify a start date\n        # because there is no updated field specified\n        self.assertEqual(len(self.mi.build_queryset(start_date=sd)), 3)\n\n        # Restore the original attribute\n        self.mi.__class__.get_updated_field = old_guf\n\n    def test_prepare(self):\n        mock = MockModel()\n        mock.pk = 20\n        mock.author = \"daniel%s\" % mock.id\n        mock.pub_date = datetime.datetime(2009, 1, 31, 4, 19, 0)\n\n        self.assertEqual(len(self.mi.prepare(mock)), 7)\n        self.assertEqual(\n            sorted(self.mi.prepare(mock).keys()),\n            [\"author\", \"django_ct\", \"django_id\", \"extra\", \"id\", \"pub_date\", \"text\"],\n        )\n\n    def test_custom_prepare(self):\n        mock = MockModel()\n        mock.pk = 20\n        mock.author = \"daniel%s\" % mock.id\n        mock.pub_date = datetime.datetime(2009, 1, 31, 4, 19, 0)\n\n        self.assertEqual(len(self.cmi.prepare(mock)), 11)\n        self.assertEqual(\n            sorted(self.cmi.prepare(mock).keys()),\n            [\n                \"author\",\n                \"author_exact\",\n                \"django_ct\",\n                \"django_id\",\n                \"extra\",\n                \"hello\",\n                \"id\",\n                \"pub_date\",\n                \"pub_date_exact\",\n                \"text\",\n                \"whee\",\n            ],\n        )\n\n        self.assertEqual(len(self.cmi.full_prepare(mock)), 11)\n        self.assertEqual(\n            sorted(self.cmi.full_prepare(mock).keys()),\n            [\n                \"author\",\n                \"author_exact\",\n                \"django_ct\",\n                \"django_id\",\n                \"extra\",\n                \"hello\",\n                \"id\",\n                \"pub_date\",\n                \"pub_date_exact\",\n                \"text\",\n                \"whee\",\n            ],\n        )\n\n    def test_thread_safety(self):\n        # This is a regression. ``SearchIndex`` used to write to\n        # ``self.prepared_data``, which would leak between threads if things\n        # went too fast.\n        exceptions = []\n\n        def threaded_prepare(index_queue, index, model):\n            try:\n                index.queue = index_queue\n                prepped = index.prepare(model)\n            except Exception as e:\n                exceptions.append(e)\n                raise\n\n        class ThreadedSearchIndex(GoodMockSearchIndex):\n            def prepare_author(self, obj):\n                if obj.pk == 20:\n                    time.sleep(0.1)\n                else:\n                    time.sleep(0.5)\n\n                index_queue.put(self.prepared_data[\"author\"])\n                return self.prepared_data[\"author\"]\n\n        tmi = ThreadedSearchIndex()\n        index_queue = queue.Queue()\n        mock_1 = MockModel()\n        mock_1.pk = 20\n        mock_1.author = \"foo\"\n        mock_1.pub_date = datetime.datetime(2009, 1, 31, 4, 19, 0)\n        mock_2 = MockModel()\n        mock_2.pk = 21\n        mock_2.author = \"daniel%s\" % mock_2.id\n        mock_2.pub_date = datetime.datetime(2009, 1, 31, 4, 19, 0)\n\n        th1 = Thread(target=threaded_prepare, args=(index_queue, tmi, mock_1))\n        th2 = Thread(target=threaded_prepare, args=(index_queue, tmi, mock_2))\n\n        th1.start()\n        th2.start()\n        th1.join()\n        th2.join()\n\n        mock_1_result = index_queue.get()\n        mock_2_result = index_queue.get()\n        self.assertEqual(mock_1_result, \"foo\")\n        self.assertEqual(mock_2_result, \"daniel21\")\n\n    def test_custom_prepare_author(self):\n        mock = MockModel()\n        mock.pk = 20\n        mock.author = \"daniel%s\" % mock.id\n        mock.pub_date = datetime.datetime(2009, 1, 31, 4, 19, 0)\n\n        self.assertEqual(len(self.cmi.prepare(mock)), 11)\n        self.assertEqual(\n            sorted(self.cmi.prepare(mock).keys()),\n            [\n                \"author\",\n                \"author_exact\",\n                \"django_ct\",\n                \"django_id\",\n                \"extra\",\n                \"hello\",\n                \"id\",\n                \"pub_date\",\n                \"pub_date_exact\",\n                \"text\",\n                \"whee\",\n            ],\n        )\n\n        self.assertEqual(len(self.cmi.full_prepare(mock)), 11)\n        self.assertEqual(\n            sorted(self.cmi.full_prepare(mock).keys()),\n            [\n                \"author\",\n                \"author_exact\",\n                \"django_ct\",\n                \"django_id\",\n                \"extra\",\n                \"hello\",\n                \"id\",\n                \"pub_date\",\n                \"pub_date_exact\",\n                \"text\",\n                \"whee\",\n            ],\n        )\n        self.assertEqual(self.cmi.prepared_data[\"author\"], \"Hi, I'm daniel20\")\n        self.assertEqual(self.cmi.prepared_data[\"author_exact\"], \"Hi, I'm daniel20\")\n\n    def test_custom_model_attr(self):\n        mock = MockModel()\n        mock.pk = 20\n        mock.author = \"daniel%s\" % mock.id\n        mock.pub_date = datetime.datetime(2009, 1, 31, 4, 19, 0)\n\n        self.assertEqual(len(self.cmi.prepare(mock)), 11)\n        self.assertEqual(\n            sorted(self.cmi.prepare(mock).keys()),\n            [\n                \"author\",\n                \"author_exact\",\n                \"django_ct\",\n                \"django_id\",\n                \"extra\",\n                \"hello\",\n                \"id\",\n                \"pub_date\",\n                \"pub_date_exact\",\n                \"text\",\n                \"whee\",\n            ],\n        )\n\n        self.assertEqual(len(self.cmi.full_prepare(mock)), 11)\n        self.assertEqual(\n            sorted(self.cmi.full_prepare(mock).keys()),\n            [\n                \"author\",\n                \"author_exact\",\n                \"django_ct\",\n                \"django_id\",\n                \"extra\",\n                \"hello\",\n                \"id\",\n                \"pub_date\",\n                \"pub_date_exact\",\n                \"text\",\n                \"whee\",\n            ],\n        )\n        self.assertEqual(self.cmi.prepared_data[\"hello\"], \"World!\")\n\n    def test_custom_index_fieldname(self):\n        mock = MockModel()\n        mock.pk = 20\n        mock.author = \"daniel%s\" % mock.id\n        mock.pub_date = datetime.datetime(2009, 1, 31, 4, 19, 0)\n\n        cofnmi = GoodOverriddenFieldNameMockSearchIndex()\n        self.assertEqual(len(cofnmi.prepare(mock)), 6)\n        self.assertEqual(\n            sorted(cofnmi.prepare(mock).keys()),\n            [\"django_ct\", \"django_id\", \"hello\", \"id\", \"more_content\", \"name_s\"],\n        )\n        self.assertEqual(cofnmi.prepared_data[\"name_s\"], \"daniel20\")\n        self.assertEqual(cofnmi.get_content_field(), \"more_content\")\n\n    def test_get_content_field(self):\n        self.assertEqual(self.mi.get_content_field(), \"text\")\n\n    def test_update(self):\n        self.sb.clear()\n        self.assertEqual(self.sb.search(\"*\")[\"hits\"], 0)\n        self.mi.update()\n        self.assertEqual(self.sb.search(\"*\")[\"hits\"], 3)\n        self.sb.clear()\n\n    def test_update_object(self):\n        self.sb.clear()\n        self.assertEqual(self.sb.search(\"*\")[\"hits\"], 0)\n\n        mock = MockModel()\n        mock.pk = 20\n        mock.author = \"daniel%s\" % mock.id\n        mock.pub_date = datetime.datetime(2009, 1, 31, 4, 19, 0)\n\n        self.mi.update_object(mock)\n        self.assertEqual(\n            [(res.content_type(), res.pk) for res in self.sb.search(\"*\")[\"results\"]],\n            [(\"core.mockmodel\", \"20\")],\n        )\n        self.sb.clear()\n\n    def test_remove_object(self):\n        self.mi.update()\n        self.assertEqual(self.sb.search(\"*\")[\"hits\"], 3)\n\n        mock = MockModel()\n        mock.pk = 20\n        mock.author = \"daniel%s\" % mock.id\n        mock.pub_date = datetime.datetime(2009, 1, 31, 4, 19, 0)\n\n        self.mi.update_object(mock)\n        self.assertEqual(self.sb.search(\"*\")[\"hits\"], 4)\n\n        self.mi.remove_object(mock)\n        self.assertEqual(\n            [(res.content_type(), res.pk) for res in self.sb.search(\"*\")[\"results\"]],\n            [(\"core.mockmodel\", \"1\"), (\"core.mockmodel\", \"2\"), (\"core.mockmodel\", \"3\")],\n        )\n\n        # Put it back so we can test passing kwargs.\n        mock = MockModel()\n        mock.pk = 20\n        mock.author = \"daniel%s\" % mock.id\n        mock.pub_date = datetime.datetime(2009, 1, 31, 4, 19, 0)\n\n        self.mi.update_object(mock)\n        self.assertEqual(self.sb.search(\"*\")[\"hits\"], 4)\n\n        self.mi.remove_object(mock, commit=False)\n        self.assertEqual(\n            [(res.content_type(), res.pk) for res in self.sb.search(\"*\")[\"results\"]],\n            [\n                (\"core.mockmodel\", \"1\"),\n                (\"core.mockmodel\", \"2\"),\n                (\"core.mockmodel\", \"3\"),\n                (\"core.mockmodel\", \"20\"),\n            ],\n        )\n\n        self.sb.clear()\n\n    def test_clear(self):\n        self.mi.update()\n        self.assertGreater(self.sb.search(\"*\")[\"hits\"], 0)\n\n        self.mi.clear()\n        self.assertEqual(self.sb.search(\"*\")[\"hits\"], 0)\n\n    def test_reindex(self):\n        self.mi.reindex()\n        self.assertEqual(\n            [(res.content_type(), res.pk) for res in self.sb.search(\"*\")[\"results\"]],\n            [(\"core.mockmodel\", \"1\"), (\"core.mockmodel\", \"2\"), (\"core.mockmodel\", \"3\")],\n        )\n        self.sb.clear()\n\n    def test_inheritance(self):\n        try:\n            agmi = AltGoodMockSearchIndex()\n        except:\n            self.fail()\n\n        self.assertEqual(len(agmi.fields), 5)\n        self.assertTrue(\"text\" in agmi.fields)\n        self.assertTrue(isinstance(agmi.fields[\"text\"], indexes.CharField))\n        self.assertTrue(\"author\" in agmi.fields)\n        self.assertTrue(isinstance(agmi.fields[\"author\"], indexes.CharField))\n        self.assertTrue(\"pub_date\" in agmi.fields)\n        self.assertTrue(isinstance(agmi.fields[\"pub_date\"], indexes.DateTimeField))\n        self.assertTrue(\"extra\" in agmi.fields)\n        self.assertTrue(isinstance(agmi.fields[\"extra\"], indexes.CharField))\n        self.assertTrue(\"additional\" in agmi.fields)\n        self.assertTrue(isinstance(agmi.fields[\"additional\"], indexes.CharField))\n\n    def test_proper_field_resolution(self):\n        mrofsc = MROFieldsSearchChild()\n        mock = MockModel()\n        mock.pk = 20\n        mock.author = \"daniel%s\" % mock.id\n        mock.pub_date = datetime.datetime(2009, 1, 31, 4, 19, 0)\n        mock.test_a = \"This is A\"\n        mock.test_b = \"This is B\"\n\n        self.assertEqual(len(mrofsc.fields), 1)\n        prepped_data = mrofsc.prepare(mock)\n        self.assertEqual(len(prepped_data), 4)\n        self.assertEqual(prepped_data[\"text\"], \"This is A\")\n\n    def test_load_all_queryset(self):\n        self.assertEqual([obj.id for obj in self.cmi.load_all_queryset()], [2, 3])\n\n    def test_nullable(self):\n        mock = MockModel()\n        mock.pk = 20\n        mock.author = None\n        mock.pub_date = datetime.datetime(2009, 1, 31, 4, 19, 0)\n\n        prepared_data = self.cnmi.prepare(mock)\n        self.assertEqual(len(prepared_data), 6)\n        self.assertEqual(\n            sorted(prepared_data.keys()),\n            [\"author\", \"author_exact\", \"django_ct\", \"django_id\", \"id\", \"text\"],\n        )\n\n        prepared_data = self.cnmi.full_prepare(mock)\n        self.assertEqual(len(prepared_data), 4)\n        self.assertEqual(\n            sorted(prepared_data.keys()), [\"django_ct\", \"django_id\", \"id\", \"text\"]\n        )\n\n    def test_custom_facet_fields(self):\n        mock = MockModel()\n        mock.pk = 20\n        mock.author = \"daniel\"\n        mock.pub_date = datetime.datetime(2009, 1, 31, 4, 19, 0)\n\n        prepared_data = self.gfmsi.prepare(mock)\n        self.assertEqual(len(prepared_data), 8)\n        self.assertEqual(\n            sorted(prepared_data.keys()),\n            [\n                \"author\",\n                \"author_foo\",\n                \"django_ct\",\n                \"django_id\",\n                \"id\",\n                \"pub_date\",\n                \"pub_date_exact\",\n                \"text\",\n            ],\n        )\n\n        prepared_data = self.gfmsi.full_prepare(mock)\n        self.assertEqual(len(prepared_data), 8)\n        self.assertEqual(\n            sorted(prepared_data.keys()),\n            [\n                \"author\",\n                \"author_foo\",\n                \"django_ct\",\n                \"django_id\",\n                \"id\",\n                \"pub_date\",\n                \"pub_date_exact\",\n                \"text\",\n            ],\n        )\n        self.assertEqual(prepared_data[\"author_foo\"], \"Hi, I'm daniel\")\n        self.assertEqual(prepared_data[\"pub_date_exact\"], \"2010-10-26T01:54:32\")\n\n\nclass BasicModelSearchIndex(indexes.ModelSearchIndex, indexes.Indexable):\n    class Meta:\n        model = MockModel\n\n\nclass FieldsModelSearchIndex(indexes.ModelSearchIndex, indexes.Indexable):\n    class Meta:\n        model = MockModel\n        fields = [\"author\", \"pub_date\"]\n\n\nclass ExcludesModelSearchIndex(indexes.ModelSearchIndex, indexes.Indexable):\n    class Meta:\n        model = MockModel\n        excludes = [\"author\", \"foo\"]\n\n\nclass FieldsWithOverrideModelSearchIndex(indexes.ModelSearchIndex, indexes.Indexable):\n    foo = indexes.IntegerField(model_attr=\"foo\")\n\n    class Meta:\n        model = MockModel\n        fields = [\"author\", \"foo\"]\n\n    def get_index_fieldname(self, f):\n        if f.name == \"author\":\n            return \"author_bar\"\n        else:\n            return f.name\n\n\nclass YetAnotherBasicModelSearchIndex(indexes.ModelSearchIndex, indexes.Indexable):\n    text = indexes.CharField(document=True)\n\n    class Meta:\n        model = AThirdMockModel\n\n\nclass PolymorphicModelSearchIndex(indexes.SearchIndex, indexes.Indexable):\n    text = indexes.CharField(document=True)\n\n    author = indexes.CharField(model_attr=\"author\")\n    pub_date = indexes.DateTimeField(model_attr=\"pub_date\")\n    average_delay = indexes.FloatField(null=True)\n\n    def get_model(self):\n        return AnotherMockModel\n\n    def prepare(self, obj):\n        self.prepared_data = super().prepare(obj)\n        if isinstance(obj, AThirdMockModel):\n            self.prepared_data[\"average_delay\"] = obj.average_delay\n        return self.prepared_data\n\n    def index_queryset(self, using=None):\n        return self.get_model().objects.all()\n\n\nclass GhettoAFifthMockModelSearchIndex(indexes.SearchIndex, indexes.Indexable):\n    text = indexes.CharField(document=True)\n\n    def get_model(self):\n        return AFifthMockModel\n\n    def index_queryset(self, using=None):\n        # Index everything,\n        return self.get_model().objects.complete_set()\n\n    def read_queryset(self, using=None):\n        return self.get_model().objects.all()\n\n\nclass ReadQuerySetTestSearchIndex(indexes.SearchIndex, indexes.Indexable):\n    author = indexes.CharField(model_attr=\"author\", document=True)\n\n    def get_model(self):\n        return AFifthMockModel\n\n    def read_queryset(self, using=None):\n        return self.get_model().objects.complete_set()\n\n\nclass TextReadQuerySetTestSearchIndex(indexes.SearchIndex, indexes.Indexable):\n    text = indexes.CharField(model_attr=\"author\", document=True)\n\n    def get_model(self):\n        return AFifthMockModel\n\n    def read_queryset(self, using=None):\n        return self.get_model().objects.complete_set()\n\n\nclass ModelWithManyToManyFieldModelSearchIndex(indexes.ModelSearchIndex):\n    def get_model(self):\n        return ManyToManyLeftSideModel\n\n\nclass ModelSearchIndexTestCase(TestCase):\n    def setUp(self):\n        super().setUp()\n        self.sb = connections[\"default\"].get_backend()\n        self.bmsi = BasicModelSearchIndex()\n        self.fmsi = FieldsModelSearchIndex()\n        self.emsi = ExcludesModelSearchIndex()\n        self.fwomsi = FieldsWithOverrideModelSearchIndex()\n        self.yabmsi = YetAnotherBasicModelSearchIndex()\n        self.m2mmsi = ModelWithManyToManyFieldModelSearchIndex()\n\n    def test_basic(self):\n        self.assertEqual(len(self.bmsi.fields), 4)\n        self.assertTrue(\"foo\" in self.bmsi.fields)\n        self.assertTrue(isinstance(self.bmsi.fields[\"foo\"], indexes.CharField))\n        self.assertEqual(self.bmsi.fields[\"foo\"].null, False)\n        self.assertEqual(self.bmsi.fields[\"foo\"].index_fieldname, \"foo\")\n        self.assertTrue(\"author\" in self.bmsi.fields)\n        self.assertTrue(isinstance(self.bmsi.fields[\"author\"], indexes.CharField))\n        self.assertEqual(self.bmsi.fields[\"author\"].null, False)\n        self.assertTrue(\"pub_date\" in self.bmsi.fields)\n        self.assertTrue(isinstance(self.bmsi.fields[\"pub_date\"], indexes.DateTimeField))\n        self.assertTrue(\n            isinstance(self.bmsi.fields[\"pub_date\"].default, datetime.datetime)\n        )\n        self.assertTrue(\"text\" in self.bmsi.fields)\n        self.assertTrue(isinstance(self.bmsi.fields[\"text\"], indexes.CharField))\n        self.assertEqual(self.bmsi.fields[\"text\"].document, True)\n        self.assertEqual(self.bmsi.fields[\"text\"].use_template, True)\n\n    def test_fields(self):\n        self.assertEqual(len(self.fmsi.fields), 3)\n        self.assertTrue(\"author\" in self.fmsi.fields)\n        self.assertTrue(isinstance(self.fmsi.fields[\"author\"], indexes.CharField))\n        self.assertTrue(\"pub_date\" in self.fmsi.fields)\n        self.assertTrue(isinstance(self.fmsi.fields[\"pub_date\"], indexes.DateTimeField))\n        self.assertTrue(\"text\" in self.fmsi.fields)\n        self.assertTrue(isinstance(self.fmsi.fields[\"text\"], indexes.CharField))\n\n    def test_excludes(self):\n        self.assertEqual(len(self.emsi.fields), 2)\n        self.assertTrue(\"pub_date\" in self.emsi.fields)\n        self.assertTrue(isinstance(self.emsi.fields[\"pub_date\"], indexes.DateTimeField))\n        self.assertTrue(\"text\" in self.emsi.fields)\n        self.assertTrue(isinstance(self.emsi.fields[\"text\"], indexes.CharField))\n        self.assertNotIn(\"related_models\", self.m2mmsi.fields)\n\n    def test_fields_with_override(self):\n        self.assertEqual(len(self.fwomsi.fields), 3)\n        self.assertTrue(\"author\" in self.fwomsi.fields)\n        self.assertTrue(isinstance(self.fwomsi.fields[\"author\"], indexes.CharField))\n        self.assertTrue(\"foo\" in self.fwomsi.fields)\n        self.assertTrue(isinstance(self.fwomsi.fields[\"foo\"], indexes.IntegerField))\n        self.assertTrue(\"text\" in self.fwomsi.fields)\n        self.assertTrue(isinstance(self.fwomsi.fields[\"text\"], indexes.CharField))\n\n    def test_overriding_field_name_with_get_index_fieldname(self):\n        self.assertTrue(self.fwomsi.fields[\"foo\"].index_fieldname, \"foo\")\n        self.assertTrue(self.fwomsi.fields[\"author\"].index_fieldname, \"author_bar\")\n\n    def test_float_integer_fields(self):\n        self.assertEqual(len(self.yabmsi.fields), 5)\n        self.assertEqual(\n            sorted(self.yabmsi.fields.keys()),\n            [\"author\", \"average_delay\", \"pub_date\", \"text\", \"view_count\"],\n        )\n        self.assertTrue(\"author\" in self.yabmsi.fields)\n        self.assertTrue(isinstance(self.yabmsi.fields[\"author\"], indexes.CharField))\n        self.assertEqual(self.yabmsi.fields[\"author\"].null, False)\n        self.assertTrue(\"pub_date\" in self.yabmsi.fields)\n        self.assertTrue(\n            isinstance(self.yabmsi.fields[\"pub_date\"], indexes.DateTimeField)\n        )\n        self.assertTrue(\n            isinstance(self.yabmsi.fields[\"pub_date\"].default, datetime.datetime)\n        )\n        self.assertTrue(\"text\" in self.yabmsi.fields)\n        self.assertTrue(isinstance(self.yabmsi.fields[\"text\"], indexes.CharField))\n        self.assertEqual(self.yabmsi.fields[\"text\"].document, True)\n        self.assertEqual(self.yabmsi.fields[\"text\"].use_template, False)\n        self.assertTrue(\"view_count\" in self.yabmsi.fields)\n        self.assertTrue(\n            isinstance(self.yabmsi.fields[\"view_count\"], indexes.IntegerField)\n        )\n        self.assertEqual(self.yabmsi.fields[\"view_count\"].null, False)\n        self.assertEqual(self.yabmsi.fields[\"view_count\"].index_fieldname, \"view_count\")\n        self.assertTrue(\"average_delay\" in self.yabmsi.fields)\n        self.assertTrue(\n            isinstance(self.yabmsi.fields[\"average_delay\"], indexes.FloatField)\n        )\n        self.assertEqual(self.yabmsi.fields[\"average_delay\"].null, False)\n        self.assertEqual(\n            self.yabmsi.fields[\"average_delay\"].index_fieldname, \"average_delay\"\n        )\n\n\nclass ModelWithManyToManyFieldAndAttributeLookupSearchIndexTestCase(TestCase):\n    def test_full_prepare(self):\n        index = ModelWithManyToManyFieldAndAttributeLookupSearchIndex()\n\n        left_model = ManyToManyLeftSideModel.objects.create()\n        right_model_1 = ManyToManyRightSideModel.objects.create(name=\"Right side 1\")\n        right_model_2 = ManyToManyRightSideModel.objects.create()\n        left_model.related_models.add(right_model_1)\n        left_model.related_models.add(right_model_2)\n\n        result = index.full_prepare(left_model)\n\n        self.assertDictEqual(\n            result,\n            {\n                \"django_ct\": \"core.manytomanyleftsidemodel\",\n                \"django_id\": \"1\",\n                \"text\": None,\n                \"id\": \"core.manytomanyleftsidemodel.1\",\n                \"related_models\": [\"Right side 1\", \"Default name\"],\n            },\n        )\n\n\nclass PolymorphicModelTestCase(TestCase):\n    def test_prepare_with_polymorphic(self):\n        index = PolymorphicModelSearchIndex()\n\n        parent_model = AnotherMockModel()\n        parent_model.author = \"Paul\"\n        parent_model.pub_date = datetime.datetime(2018, 5, 23, 13, 57)\n        parent_model.save()\n\n        child_model = AThirdMockModel()\n        child_model.author = \"Paula\"\n        child_model.pub_date = datetime.datetime(2018, 5, 23, 13, 58)\n        child_model.average_delay = 0.5\n        child_model.save()\n\n        prepared_data = index.prepare(parent_model)\n        self.assertEqual(len(prepared_data), 7)\n        self.assertEqual(\n            sorted(prepared_data.keys()),\n            [\n                \"author\",\n                \"average_delay\",\n                \"django_ct\",\n                \"django_id\",\n                \"id\",\n                \"pub_date\",\n                \"text\",\n            ],\n        )\n        self.assertEqual(prepared_data[\"django_ct\"], \"core.anothermockmodel\")\n        self.assertEqual(prepared_data[\"average_delay\"], None)\n\n        prepared_data = index.prepare(child_model)\n        self.assertEqual(len(prepared_data), 7)\n        self.assertEqual(\n            sorted(prepared_data.keys()),\n            [\n                \"author\",\n                \"average_delay\",\n                \"django_ct\",\n                \"django_id\",\n                \"id\",\n                \"pub_date\",\n                \"text\",\n            ],\n        )\n        self.assertEqual(prepared_data[\"django_ct\"], \"core.anothermockmodel\")\n        self.assertEqual(prepared_data[\"average_delay\"], 0.5)\n"
  },
  {
    "path": "test_haystack/test_inputs.py",
    "content": "from django.test import TestCase\n\nfrom haystack import connections, inputs\n\n\nclass InputTestCase(TestCase):\n    def setUp(self):\n        super().setUp()\n        self.query_obj = connections[\"default\"].get_query()\n\n    def test_raw_init(self):\n        raw = inputs.Raw(\"hello OR there, :you\")\n        self.assertEqual(raw.query_string, \"hello OR there, :you\")\n        self.assertEqual(raw.kwargs, {})\n        self.assertEqual(raw.post_process, False)\n\n        raw = inputs.Raw(\"hello OR there, :you\", test=\"really\")\n        self.assertEqual(raw.query_string, \"hello OR there, :you\")\n        self.assertEqual(raw.kwargs, {\"test\": \"really\"})\n        self.assertEqual(raw.post_process, False)\n\n    def test_raw_prepare(self):\n        raw = inputs.Raw(\"hello OR there, :you\")\n        self.assertEqual(raw.prepare(self.query_obj), \"hello OR there, :you\")\n\n    def test_clean_init(self):\n        clean = inputs.Clean(\"hello OR there, :you\")\n        self.assertEqual(clean.query_string, \"hello OR there, :you\")\n        self.assertEqual(clean.post_process, True)\n\n    def test_clean_prepare(self):\n        clean = inputs.Clean(\"hello OR there, :you\")\n        self.assertEqual(clean.prepare(self.query_obj), \"hello OR there, :you\")\n\n    def test_exact_init(self):\n        exact = inputs.Exact(\"hello OR there, :you\")\n        self.assertEqual(exact.query_string, \"hello OR there, :you\")\n        self.assertEqual(exact.post_process, True)\n\n    def test_exact_prepare(self):\n        exact = inputs.Exact(\"hello OR there, :you\")\n        self.assertEqual(exact.prepare(self.query_obj), '\"hello OR there, :you\"')\n\n        # Incorrect, but the backend doesn't implement much of anything useful.\n        exact = inputs.Exact(\"hello OR there, :you\", clean=True)\n        self.assertEqual(exact.prepare(self.query_obj), '\"hello OR there, :you\"')\n\n    def test_not_init(self):\n        not_it = inputs.Not(\"hello OR there, :you\")\n        self.assertEqual(not_it.query_string, \"hello OR there, :you\")\n        self.assertEqual(not_it.post_process, True)\n\n    def test_not_prepare(self):\n        not_it = inputs.Not(\"hello OR there, :you\")\n        self.assertEqual(not_it.prepare(self.query_obj), \"NOT (hello OR there, :you)\")\n\n    def test_autoquery_init(self):\n        autoquery = inputs.AutoQuery('panic -don\\'t \"froody dude\"')\n        self.assertEqual(autoquery.query_string, 'panic -don\\'t \"froody dude\"')\n        self.assertEqual(autoquery.post_process, False)\n\n    def test_autoquery_prepare(self):\n        autoquery = inputs.AutoQuery('panic -don\\'t \"froody dude\"')\n        self.assertEqual(\n            autoquery.prepare(self.query_obj), 'panic NOT don\\'t \"froody dude\"'\n        )\n\n    def test_altparser_init(self):\n        altparser = inputs.AltParser(\"dismax\")\n        self.assertEqual(altparser.parser_name, \"dismax\")\n        self.assertEqual(altparser.query_string, \"\")\n        self.assertEqual(altparser.kwargs, {})\n        self.assertEqual(altparser.post_process, False)\n\n        altparser = inputs.AltParser(\"dismax\", \"douglas adams\", qf=\"author\", mm=1)\n        self.assertEqual(altparser.parser_name, \"dismax\")\n        self.assertEqual(altparser.query_string, \"douglas adams\")\n        self.assertEqual(altparser.kwargs, {\"mm\": 1, \"qf\": \"author\"})\n        self.assertEqual(altparser.post_process, False)\n\n    def test_altparser_prepare(self):\n        altparser = inputs.AltParser(\"dismax\", \"douglas adams\", qf=\"author\", mm=1)\n        # Not supported on that backend.\n        self.assertEqual(altparser.prepare(self.query_obj), \"\")\n"
  },
  {
    "path": "test_haystack/test_loading.py",
    "content": "import unittest\n\nfrom django.conf import settings\nfrom django.core.exceptions import ImproperlyConfigured\nfrom django.test import TestCase, override_settings\n\nfrom haystack import indexes\nfrom haystack.exceptions import NotHandled, SearchFieldError\nfrom haystack.utils import loading\nfrom test_haystack.core.models import AnotherMockModel, MockModel\n\ntry:\n    import pysolr\nexcept ImportError:\n    pysolr = False\n\n\nclass ConnectionHandlerTestCase(TestCase):\n    def test_init(self):\n        ch = loading.ConnectionHandler({})\n        self.assertEqual(ch.connections_info, {})\n\n        ch = loading.ConnectionHandler(\n            {\n                \"default\": {\n                    \"ENGINE\": \"haystack.backends.solr_backend.SolrEngine\",\n                    \"URL\": \"http://localhost:9001/solr/test_default\",\n                }\n            }\n        )\n        self.assertEqual(\n            ch.connections_info,\n            {\n                \"default\": {\n                    \"ENGINE\": \"haystack.backends.solr_backend.SolrEngine\",\n                    \"URL\": \"http://localhost:9001/solr/test_default\",\n                }\n            },\n        )\n\n    @unittest.skipIf(pysolr is False, \"pysolr required\")\n    def test_get_item(self):\n        ch = loading.ConnectionHandler({})\n\n        try:\n            empty_engine = ch[\"default\"]\n            self.fail()\n        except ImproperlyConfigured:\n            pass\n\n        ch = loading.ConnectionHandler(\n            {\n                \"default\": {\n                    \"ENGINE\": \"haystack.backends.solr_backend.SolrEngine\",\n                    \"URL\": \"http://localhost:9001/solr/test_default\",\n                }\n            }\n        )\n        solr_engine = ch[\"default\"]\n        backend_path, memory_address = (\n            repr(solr_engine).strip(\"<>\").split(\" object at \")\n        )\n        self.assertEqual(backend_path, \"haystack.backends.solr_backend.SolrEngine\")\n\n        solr_engine_2 = ch[\"default\"]\n        backend_path_2, memory_address_2 = (\n            repr(solr_engine_2).strip(\"<>\").split(\" object at \")\n        )\n        self.assertEqual(backend_path_2, \"haystack.backends.solr_backend.SolrEngine\")\n        # Ensure we're loading out of the memorized connection.\n        self.assertEqual(memory_address_2, memory_address)\n\n        try:\n            empty_engine = ch[\"slave\"]\n            self.fail()\n        except ImproperlyConfigured:\n            pass\n\n    def test_get_unified_index(self):\n        ch = loading.ConnectionHandler(\n            {\"default\": {\"ENGINE\": \"haystack.backends.simple_backend.SimpleEngine\"}}\n        )\n        ui = ch[\"default\"].get_unified_index()\n        klass, address = repr(ui).strip(\"<>\").split(\" object at \")\n        self.assertEqual(str(klass), \"haystack.utils.loading.UnifiedIndex\")\n\n        ui_2 = ch[\"default\"].get_unified_index()\n        klass_2, address_2 = repr(ui_2).strip(\"<>\").split(\" object at \")\n        self.assertEqual(str(klass_2), \"haystack.utils.loading.UnifiedIndex\")\n        self.assertEqual(address_2, address)\n\n\nclass ConnectionRouterTestCase(TestCase):\n    @override_settings()\n    def test_init(self):\n        del settings.HAYSTACK_ROUTERS\n        cr = loading.ConnectionRouter()\n        self.assertEqual(\n            [str(route.__class__) for route in cr.routers],\n            [\"<class 'haystack.routers.DefaultRouter'>\"],\n        )\n\n    @override_settings(HAYSTACK_ROUTERS=[\"haystack.routers.DefaultRouter\"])\n    def test_router_override1(self):\n        cr = loading.ConnectionRouter()\n        self.assertEqual(\n            [str(route.__class__) for route in cr.routers],\n            [\"<class 'haystack.routers.DefaultRouter'>\"],\n        )\n\n    @override_settings(HAYSTACK_ROUTERS=[])\n    def test_router_override2(self):\n        cr = loading.ConnectionRouter()\n        self.assertEqual(\n            [str(route.__class__) for route in cr.routers],\n            [\"<class 'haystack.routers.DefaultRouter'>\"],\n        )\n\n    @override_settings(\n        HAYSTACK_ROUTERS=[\n            \"test_haystack.mocks.MockMasterSlaveRouter\",\n            \"haystack.routers.DefaultRouter\",\n        ]\n    )\n    def test_router_override3(self):\n        cr = loading.ConnectionRouter()\n        self.assertEqual(\n            [str(route.__class__) for route in cr.routers],\n            [\n                \"<class 'test_haystack.mocks.MockMasterSlaveRouter'>\",\n                \"<class 'haystack.routers.DefaultRouter'>\",\n            ],\n        )\n\n    @override_settings()\n    def test_actions1(self):\n        del settings.HAYSTACK_ROUTERS\n        cr = loading.ConnectionRouter()\n        self.assertEqual(cr.for_read(), \"default\")\n        self.assertEqual(cr.for_write(), [\"default\"])\n\n    @override_settings(\n        HAYSTACK_ROUTERS=[\n            \"test_haystack.mocks.MockMasterSlaveRouter\",\n            \"haystack.routers.DefaultRouter\",\n        ]\n    )\n    def test_actions2(self):\n        cr = loading.ConnectionRouter()\n        self.assertEqual(cr.for_read(), \"slave\")\n        self.assertEqual(cr.for_write(), [\"master\", \"default\"])\n\n    @override_settings(\n        HAYSTACK_ROUTERS=[\n            \"test_haystack.mocks.MockPassthroughRouter\",\n            \"test_haystack.mocks.MockMasterSlaveRouter\",\n            \"haystack.routers.DefaultRouter\",\n        ]\n    )\n    def test_actions3(self):\n        cr = loading.ConnectionRouter()\n        # Demonstrate pass-through\n        self.assertEqual(cr.for_read(), \"slave\")\n        self.assertEqual(cr.for_write(), [\"master\", \"default\"])\n        # Demonstrate that hinting can change routing.\n        self.assertEqual(cr.for_read(pass_through=False), \"pass\")\n        self.assertEqual(\n            cr.for_write(pass_through=False), [\"pass\", \"master\", \"default\"]\n        )\n\n    @override_settings(\n        HAYSTACK_ROUTERS=[\n            \"test_haystack.mocks.MockMultiRouter\",\n            \"haystack.routers.DefaultRouter\",\n        ]\n    )\n    def test_actions4(self):\n        cr = loading.ConnectionRouter()\n        # Demonstrate that a router can return multiple backends in the \"for_write\" method\n        self.assertEqual(cr.for_read(), \"default\")\n        self.assertEqual(cr.for_write(), [\"multi1\", \"multi2\", \"default\"])\n\n\nclass MockNotAModel(object):\n    pass\n\n\nclass FakeSearchIndex(indexes.BasicSearchIndex, indexes.Indexable):\n    def update_object(self, instance, **kwargs):\n        # Incorrect behavior but easy to test and all we care about is that we\n        # make it here. We rely on the `SearchIndex` tests to ensure correct\n        # behavior.\n        return True\n\n    def remove_object(self, instance, **kwargs):\n        # Incorrect behavior but easy to test and all we care about is that we\n        # make it here. We rely on the `SearchIndex` tests to ensure correct\n        # behavior.\n        return True\n\n    def get_model(self):\n        return MockModel\n\n\nclass InvalidSearchIndex(indexes.SearchIndex, indexes.Indexable):\n    document = indexes.CharField(document=True)\n\n    def get_model(self):\n        return MockModel\n\n\nclass BasicMockModelSearchIndex(indexes.BasicSearchIndex, indexes.Indexable):\n    def get_model(self):\n        return MockModel\n\n\nclass BasicAnotherMockModelSearchIndex(indexes.BasicSearchIndex, indexes.Indexable):\n    def get_model(self):\n        return AnotherMockModel\n\n\nclass ValidSearchIndex(indexes.SearchIndex, indexes.Indexable):\n    text = indexes.CharField(document=True)\n    author = indexes.CharField(index_fieldname=\"name\")\n    title = indexes.CharField(indexed=False)\n\n    def get_model(self):\n        return MockModel\n\n\nclass AlternateValidSearchIndex(indexes.SearchIndex, indexes.Indexable):\n    text = indexes.CharField(document=True)\n    author = indexes.CharField(faceted=True)\n    title = indexes.CharField(faceted=True)\n\n    def get_model(self):\n        return AnotherMockModel\n\n\nclass ExplicitFacetSearchIndex(indexes.SearchIndex, indexes.Indexable):\n    text = indexes.CharField(document=True)\n    author = indexes.CharField(faceted=True)\n    title = indexes.CharField()\n    title_facet = indexes.FacetCharField(facet_for=\"title\")\n    bare_facet = indexes.FacetCharField()\n\n    def get_model(self):\n        return MockModel\n\n\nclass MultiValueValidSearchIndex(indexes.SearchIndex, indexes.Indexable):\n    text = indexes.CharField(document=True)\n    author = indexes.MultiValueField(stored=False)\n    title = indexes.CharField(indexed=False)\n\n    def get_model(self):\n        return MockModel\n\n\nclass UnifiedIndexTestCase(TestCase):\n    def setUp(self):\n        super().setUp()\n        self.ui = loading.UnifiedIndex()\n        self.ui.build([])\n\n    def test_get_index(self):\n        self.assertRaises(NotHandled, self.ui.get_index, MockModel)\n        try:\n            self.ui.get_index(MockModel)\n        except NotHandled as e:\n            self.assertTrue(MockModel.__name__ in str(e))\n\n        self.ui.build(indexes=[BasicMockModelSearchIndex()])\n        self.assertTrue(\n            isinstance(self.ui.get_index(MockModel), indexes.BasicSearchIndex)\n        )\n\n    def test_get_indexed_models(self):\n        self.assertEqual(self.ui.get_indexed_models(), [])\n\n        self.ui.build(indexes=[ValidSearchIndex()])\n        indexed_models = self.ui.get_indexed_models()\n        self.assertEqual(len(indexed_models), 1)\n        self.assertTrue(MockModel in indexed_models)\n\n    def test_get_indexes(self):\n        self.assertEqual(self.ui.get_indexes(), {})\n\n        index = ValidSearchIndex()\n        self.ui.build(indexes=[index])\n\n        results = self.ui.get_indexes()\n        self.assertEqual(len(results), 1)\n        self.assertTrue(MockModel in results)\n        self.assertEqual(results[MockModel], index)\n\n    def test_all_searchfields(self):\n        self.ui.build(indexes=[BasicMockModelSearchIndex()])\n        fields = self.ui.all_searchfields()\n        self.assertEqual(len(fields), 1)\n        self.assertTrue(\"text\" in fields)\n        self.assertTrue(isinstance(fields[\"text\"], indexes.CharField))\n        self.assertEqual(fields[\"text\"].document, True)\n        self.assertEqual(fields[\"text\"].use_template, True)\n\n        self.ui.build(\n            indexes=[BasicMockModelSearchIndex(), AlternateValidSearchIndex()]\n        )\n        fields = self.ui.all_searchfields()\n        self.assertEqual(len(fields), 5)\n        self.assertEqual(\n            sorted(fields.keys()),\n            [\"author\", \"author_exact\", \"text\", \"title\", \"title_exact\"],\n        )\n        self.assertTrue(\"text\" in fields)\n        self.assertTrue(isinstance(fields[\"text\"], indexes.CharField))\n        self.assertEqual(fields[\"text\"].document, True)\n        self.assertEqual(fields[\"text\"].use_template, True)\n        self.assertTrue(\"title\" in fields)\n        self.assertTrue(isinstance(fields[\"title\"], indexes.CharField))\n        self.assertEqual(fields[\"title\"].document, False)\n        self.assertEqual(fields[\"title\"].use_template, False)\n        self.assertEqual(fields[\"title\"].faceted, True)\n        self.assertEqual(fields[\"title\"].indexed, True)\n        self.assertTrue(\"author\" in fields)\n        self.assertTrue(isinstance(fields[\"author\"], indexes.CharField))\n        self.assertEqual(fields[\"author\"].document, False)\n        self.assertEqual(fields[\"author\"].use_template, False)\n        self.assertEqual(fields[\"author\"].faceted, True)\n        self.assertEqual(fields[\"author\"].stored, True)\n        self.assertEqual(fields[\"author\"].index_fieldname, \"author\")\n\n        self.ui.build(\n            indexes=[AlternateValidSearchIndex(), MultiValueValidSearchIndex()]\n        )\n        fields = self.ui.all_searchfields()\n        self.assertEqual(len(fields), 5)\n        self.assertEqual(\n            sorted(fields.keys()),\n            [\"author\", \"author_exact\", \"text\", \"title\", \"title_exact\"],\n        )\n        self.assertTrue(\"text\" in fields)\n        self.assertTrue(isinstance(fields[\"text\"], indexes.CharField))\n        self.assertEqual(fields[\"text\"].document, True)\n        self.assertEqual(fields[\"text\"].use_template, False)\n        self.assertTrue(\"title\" in fields)\n        self.assertTrue(isinstance(fields[\"title\"], indexes.CharField))\n        self.assertEqual(fields[\"title\"].document, False)\n        self.assertEqual(fields[\"title\"].use_template, False)\n        self.assertEqual(fields[\"title\"].faceted, True)\n        self.assertEqual(fields[\"title\"].indexed, True)\n        self.assertTrue(\"author\" in fields)\n        self.assertTrue(isinstance(fields[\"author\"], indexes.MultiValueField))\n        self.assertEqual(fields[\"author\"].document, False)\n        self.assertEqual(fields[\"author\"].use_template, False)\n        self.assertEqual(fields[\"author\"].stored, True)\n        self.assertEqual(fields[\"author\"].faceted, True)\n        self.assertEqual(fields[\"author\"].index_fieldname, \"author\")\n\n        try:\n            self.ui.build(indexes=[AlternateValidSearchIndex(), InvalidSearchIndex()])\n            self.fail()\n        except SearchFieldError:\n            pass\n\n    def test_get_index_fieldname(self):\n        self.assertEqual(self.ui._fieldnames, {})\n\n        self.ui.build(indexes=[ValidSearchIndex(), BasicAnotherMockModelSearchIndex()])\n        self.ui.get_index_fieldname(\"text\")\n        self.assertEqual(\n            self.ui._fieldnames, {\"text\": \"text\", \"title\": \"title\", \"author\": \"name\"}\n        )\n        self.assertEqual(self.ui.get_index_fieldname(\"text\"), \"text\")\n        self.assertEqual(self.ui.get_index_fieldname(\"author\"), \"name\")\n        self.assertEqual(self.ui.get_index_fieldname(\"title\"), \"title\")\n\n        # Reset the internal state to test the invalid case.\n        self.ui.reset()\n        self.assertEqual(self.ui._fieldnames, {})\n\n        try:\n            self.ui.build(indexes=[ValidSearchIndex(), AlternateValidSearchIndex()])\n            self.fail()\n        except SearchFieldError:\n            pass\n\n    def test_basic_get_facet_field_name(self):\n        self.assertEqual(self.ui._facet_fieldnames, {})\n\n        self.ui.build(\n            indexes=[BasicMockModelSearchIndex(), AlternateValidSearchIndex()]\n        )\n        self.ui.get_facet_fieldname(\"text\")\n        self.assertEqual(\n            self.ui._facet_fieldnames,\n            {\"title\": \"title_exact\", \"author\": \"author_exact\"},\n        )\n        self.assertEqual(self.ui.get_index_fieldname(\"text\"), \"text\")\n        self.assertEqual(self.ui.get_index_fieldname(\"author\"), \"author\")\n        self.assertEqual(self.ui.get_index_fieldname(\"title\"), \"title\")\n\n        self.assertEqual(self.ui.get_facet_fieldname(\"text\"), \"text\")\n        self.assertEqual(self.ui.get_facet_fieldname(\"author\"), \"author_exact\")\n        self.assertEqual(self.ui.get_facet_fieldname(\"title\"), \"title_exact\")\n\n    def test_more_advanced_get_facet_field_name(self):\n        self.assertEqual(self.ui._facet_fieldnames, {})\n\n        self.ui.build(\n            indexes=[BasicAnotherMockModelSearchIndex(), ExplicitFacetSearchIndex()]\n        )\n        self.ui.get_facet_fieldname(\"text\")\n        self.assertEqual(\n            self.ui._facet_fieldnames,\n            {\n                \"bare_facet\": \"bare_facet\",\n                \"title\": \"title_facet\",\n                \"author\": \"author_exact\",\n            },\n        )\n        self.assertEqual(self.ui.get_facet_fieldname(\"title\"), \"title_facet\")\n        self.assertEqual(self.ui.get_facet_fieldname(\"bare_facet\"), \"bare_facet\")\n"
  },
  {
    "path": "test_haystack/test_management_commands.py",
    "content": "from unittest.mock import call, patch\n\nfrom django.conf import settings\nfrom django.core.management import call_command\nfrom django.test import TestCase\n\n__all__ = [\"CoreManagementCommandsTestCase\"]\n\n\nclass CoreManagementCommandsTestCase(TestCase):\n    @patch(\"haystack.management.commands.update_index.Command.update_backend\")\n    def test_update_index_default_using(self, m):\n        \"\"\"update_index uses default index when --using is not present\"\"\"\n        call_command(\"update_index\")\n        for k in settings.HAYSTACK_CONNECTIONS:\n            self.assertTrue(call(\"core\", k) in m.call_args_list)\n\n    @patch(\"haystack.management.commands.update_index.Command.update_backend\")\n    def test_update_index_using(self, m):\n        \"\"\"update_index only applies to indexes specified with --using\"\"\"\n        call_command(\"update_index\", verbosity=0, using=[\"eng\", \"fra\"])\n        m.assert_any_call(\"core\", \"eng\")\n        m.assert_any_call(\"core\", \"fra\")\n        self.assertTrue(\n            call(\"core\", \"default\") not in m.call_args_list,\n            \"update_index should have been restricted to the index specified with --using\",\n        )\n\n    @patch(\"haystack.loading.ConnectionHandler.__getitem__\")\n    def test_clear_index_default_using(self, m):\n        \"\"\"clear_index uses all keys when --using is not present\"\"\"\n        call_command(\"clear_index\", verbosity=0, interactive=False)\n        self.assertEqual(len(settings.HAYSTACK_CONNECTIONS), m.call_count)\n        for k in settings.HAYSTACK_CONNECTIONS:\n            self.assertTrue(call(k) in m.call_args_list)\n\n    @patch(\"haystack.loading.ConnectionHandler.__getitem__\")\n    def test_clear_index_using(self, m):\n        \"\"\"clear_index only applies to indexes specified with --using\"\"\"\n\n        call_command(\"clear_index\", verbosity=0, interactive=False, using=[\"eng\"])\n        m.assert_called_with(\"eng\")\n        self.assertTrue(\n            m.return_value.get_backend.called, \"backend.clear() should be called\"\n        )\n        self.assertTrue(\n            call(\"default\") not in m.call_args_list,\n            \"clear_index should have been restricted to the index specified with --using\",\n        )\n\n    @patch(\"haystack.loading.ConnectionHandler.__getitem__\")\n    @patch(\"haystack.management.commands.update_index.Command.update_backend\")\n    def test_rebuild_index_default_using(self, m1, m2):\n        \"\"\"rebuild_index uses default index when --using is not present\"\"\"\n\n        call_command(\"rebuild_index\", verbosity=0, interactive=False)\n        self.assertEqual(len(settings.HAYSTACK_CONNECTIONS), m2.call_count)\n        for k in settings.HAYSTACK_CONNECTIONS:\n            self.assertTrue(call(k) in m2.call_args_list)\n        m1.assert_any_call(\"core\", \"default\")\n        m1.assert_any_call(\"core\", \"whoosh\")\n\n    @patch(\"haystack.loading.ConnectionHandler.__getitem__\")\n    @patch(\"haystack.management.commands.update_index.Command.update_backend\")\n    def test_rebuild_index_using(self, m1, m2):\n        \"\"\"rebuild_index passes --using to clear_index and update_index\"\"\"\n\n        call_command(\"rebuild_index\", verbosity=0, interactive=False, using=[\"eng\"])\n        m2.assert_called_with(\"eng\")\n        m1.assert_any_call(\"core\", \"eng\")\n\n    @patch(\"haystack.management.commands.update_index.Command.handle\", return_value=\"\")\n    @patch(\"haystack.management.commands.clear_index.Command.handle\", return_value=\"\")\n    def test_rebuild_index(self, mock_handle_clear, mock_handle_update):\n        call_command(\"rebuild_index\", interactive=False)\n\n        self.assertTrue(mock_handle_clear.called)\n        self.assertTrue(mock_handle_update.called)\n\n    @patch(\"haystack.management.commands.update_index.Command.handle\")\n    @patch(\"haystack.management.commands.clear_index.Command.handle\")\n    def test_rebuild_index_nocommit(self, *mocks):\n        call_command(\"rebuild_index\", interactive=False, commit=False)\n\n        for m in mocks:\n            self.assertEqual(m.call_count, 1)\n\n            args, kwargs = m.call_args\n\n            self.assertIn(\"commit\", kwargs)\n            self.assertEqual(False, kwargs[\"commit\"])\n\n    @patch(\"haystack.management.commands.clear_index.Command.handle\", return_value=\"\")\n    @patch(\"haystack.management.commands.update_index.Command.handle\", return_value=\"\")\n    def test_rebuild_index_nocommit(self, update_mock, clear_mock):\n        \"\"\"\n        Confirm that command-line option parsing produces the same results as using call_command() directly,\n        mostly as a sanity check for the logic in rebuild_index which combines the option_lists for its\n        component commands.\n        \"\"\"\n        from haystack.management.commands.rebuild_index import Command\n\n        Command().run_from_argv(\n            [\"django-admin.py\", \"rebuild_index\", \"--noinput\", \"--nocommit\"]\n        )\n\n        for m in (clear_mock, update_mock):\n            self.assertEqual(m.call_count, 1)\n\n            args, kwargs = m.call_args\n\n            self.assertIn(\"commit\", kwargs)\n            self.assertEqual(False, kwargs[\"commit\"])\n\n        args, kwargs = clear_mock.call_args\n\n        self.assertIn(\"interactive\", kwargs)\n        self.assertIs(kwargs[\"interactive\"], False)\n"
  },
  {
    "path": "test_haystack/test_managers.py",
    "content": "import datetime\n\nfrom django.contrib.gis.measure import D\nfrom django.test import TestCase\n\nfrom haystack import connections\nfrom haystack.manager import SearchIndexManager\nfrom haystack.models import SearchResult\nfrom haystack.query import (\n    EmptySearchQuerySet,\n    SearchQuerySet,\n    ValuesListSearchQuerySet,\n    ValuesSearchQuerySet,\n)\nfrom test_haystack.core.models import MockModel\n\nfrom .mocks import CharPKMockSearchBackend\nfrom .test_views import BasicAnotherMockModelSearchIndex, BasicMockModelSearchIndex\n\n\nclass CustomManager(SearchIndexManager):\n    def filter(self, *args, **kwargs):\n        return self.get_search_queryset().filter(content=\"foo1\").filter(*args, **kwargs)\n\n\nclass CustomMockModelIndexWithObjectsManager(BasicMockModelSearchIndex):\n    objects = CustomManager()\n\n\nclass CustomMockModelIndexWithAnotherManager(BasicMockModelSearchIndex):\n    another = CustomManager()\n\n\nclass ManagerTestCase(TestCase):\n    fixtures = [\"bulk_data.json\"]\n\n    def setUp(self):\n        super().setUp()\n\n        self.search_index = BasicMockModelSearchIndex\n        # Update the \"index\".\n        backend = connections[\"default\"].get_backend()\n        backend.clear()\n        backend.update(self.search_index(), MockModel.objects.all())\n        ui = connections[\"default\"].get_unified_index()\n        ui.build([BasicMockModelSearchIndex(), BasicAnotherMockModelSearchIndex()])\n\n        self.search_queryset = BasicMockModelSearchIndex.objects.all()\n\n    def test_queryset(self):\n        self.assertTrue(isinstance(self.search_queryset, SearchQuerySet))\n\n    def test_none(self):\n        self.assertTrue(\n            isinstance(self.search_index.objects.none(), EmptySearchQuerySet)\n        )\n\n    def test_filter(self):\n        sqs = self.search_index.objects.filter(content=\"foo\")\n        self.assertTrue(isinstance(sqs, SearchQuerySet))\n        self.assertEqual(len(sqs.query.query_filter), 1)\n\n    def test_exclude(self):\n        sqs = self.search_index.objects.exclude(content=\"foo\")\n        self.assertTrue(isinstance(sqs, SearchQuerySet))\n        self.assertEqual(len(sqs.query.query_filter), 1)\n\n    def test_filter_and(self):\n        sqs = self.search_index.objects.filter_and(content=\"foo\")\n        self.assertTrue(isinstance(sqs, SearchQuerySet))\n        self.assertEqual(sqs.query.query_filter.connector, \"AND\")\n\n    def test_filter_or(self):\n        sqs = self.search_index.objects.filter_or(content=\"foo\")\n        self.assertTrue(isinstance(sqs, SearchQuerySet))\n        self.assertEqual(sqs.query.query_filter.connector, \"OR\")\n\n    def test_order_by(self):\n        sqs = self.search_index.objects.order_by(\"foo\")\n        self.assertTrue(isinstance(sqs, SearchQuerySet))\n        self.assertTrue(\"foo\" in sqs.query.order_by)\n\n    def test_order_by_distance(self):\n        from django.contrib.gis.geos import Point\n\n        p = Point(1.23, 4.56)\n        sqs = self.search_index.objects.distance(\"location\", p).order_by(\"distance\")\n        self.assertTrue(isinstance(sqs, SearchQuerySet))\n\n        params = sqs.query.build_params()\n\n        self.assertIn(\"distance_point\", params)\n        self.assertDictEqual(\n            params[\"distance_point\"], {\"field\": \"location\", \"point\": p}\n        )\n        self.assertTupleEqual(params[\"distance_point\"][\"point\"].coords, (1.23, 4.56))\n\n        self.assertListEqual(params[\"sort_by\"], [\"distance\"])\n\n    def test_highlight(self):\n        sqs = self.search_index.objects.highlight()\n        self.assertEqual(sqs.query.highlight, True)\n\n    def test_boost(self):\n        sqs = self.search_index.objects.boost(\"foo\", 10)\n        self.assertTrue(isinstance(sqs, SearchQuerySet))\n        self.assertEqual(len(sqs.query.boost.keys()), 1)\n\n    def test_facets(self):\n        sqs = self.search_index.objects.facet(\"foo\")\n        self.assertTrue(isinstance(sqs, SearchQuerySet))\n        self.assertEqual(len(sqs.query.facets), 1)\n\n    def test_within(self):\n        from django.contrib.gis.geos import Point\n\n        # This is a meaningless query but we're just confirming that the manager updates the parameters here:\n        p1 = Point(-90, -90)\n        p2 = Point(90, 90)\n        sqs = self.search_index.objects.within(\"location\", p1, p2)\n        self.assertTrue(isinstance(sqs, SearchQuerySet))\n\n        params = sqs.query.build_params()\n\n        self.assertIn(\"within\", params)\n        self.assertDictEqual(\n            params[\"within\"], {\"field\": \"location\", \"point_1\": p1, \"point_2\": p2}\n        )\n\n    def test_dwithin(self):\n        from django.contrib.gis.geos import Point\n\n        p = Point(0, 0)\n        distance = D(mi=500)\n        sqs = self.search_index.objects.dwithin(\"location\", p, distance)\n        self.assertTrue(isinstance(sqs, SearchQuerySet))\n\n        params = sqs.query.build_params()\n\n        self.assertIn(\"dwithin\", params)\n        self.assertDictEqual(\n            params[\"dwithin\"], {\"field\": \"location\", \"point\": p, \"distance\": distance}\n        )\n\n    def test_distance(self):\n        from django.contrib.gis.geos import Point\n\n        p = Point(0, 0)\n        sqs = self.search_index.objects.distance(\"location\", p)\n        self.assertTrue(isinstance(sqs, SearchQuerySet))\n\n        params = sqs.query.build_params()\n        self.assertIn(\"distance_point\", params)\n        self.assertDictEqual(\n            params[\"distance_point\"], {\"field\": \"location\", \"point\": p}\n        )\n\n    def test_date_facets(self):\n        sqs = self.search_index.objects.date_facet(\n            \"foo\",\n            start_date=datetime.date(2008, 2, 25),\n            end_date=datetime.date(2009, 2, 25),\n            gap_by=\"month\",\n        )\n        self.assertTrue(isinstance(sqs, SearchQuerySet))\n        self.assertEqual(len(sqs.query.date_facets), 1)\n\n    def test_query_facets(self):\n        sqs = self.search_index.objects.query_facet(\"foo\", \"[bar TO *]\")\n        self.assertTrue(isinstance(sqs, SearchQuerySet))\n        self.assertEqual(len(sqs.query.query_facets), 1)\n\n    def test_narrow(self):\n        sqs = self.search_index.objects.narrow(\"content:foo\")\n        self.assertTrue(isinstance(sqs, SearchQuerySet))\n        self.assertSetEqual(set([\"content:foo\"]), sqs.query.narrow_queries)\n\n    def test_raw_search(self):\n        self.assertEqual(len(self.search_index.objects.raw_search(\"foo\")), 23)\n\n    def test_load_all(self):\n        # Models with character primary keys.\n        sqs = self.search_index.objects.all()\n        sqs.query.backend = CharPKMockSearchBackend(\"charpk\")\n        results = sqs.load_all().all()\n        self.assertEqual(len(results._result_cache), 0)\n\n    def test_auto_query(self):\n        sqs = self.search_index.objects.auto_query(\"test search -stuff\")\n        self.assertTrue(isinstance(sqs, SearchQuerySet))\n        self.assertEqual(\n            repr(sqs.query.query_filter),\n            \"<SQ: AND content__content=test search -stuff>\",\n        )\n\n        # With keyword argument\n        sqs = self.search_index.objects.auto_query(\n            \"test search -stuff\", fieldname=\"title\"\n        )\n        self.assertTrue(isinstance(sqs, SearchQuerySet))\n        self.assertEqual(\n            repr(sqs.query.query_filter), \"<SQ: AND title__content=test search -stuff>\"\n        )\n\n    def test_autocomplete(self):\n        # Not implemented\n        pass\n\n    def test_count(self):\n        self.assertEqual(SearchQuerySet().count(), 23)\n        self.assertEqual(self.search_index.objects.count(), 23)\n\n    def test_best_match(self):\n        self.assertTrue(\n            isinstance(self.search_index.objects.best_match(), SearchResult)\n        )\n\n    def test_latest(self):\n        self.assertTrue(\n            isinstance(self.search_index.objects.latest(\"pub_date\"), SearchResult)\n        )\n\n    def test_more_like_this(self):\n        mock = MockModel()\n        mock.id = 1\n\n        self.assertEqual(len(self.search_index.objects.more_like_this(mock)), 23)\n\n    def test_facet_counts(self):\n        self.assertEqual(self.search_index.objects.facet_counts(), {})\n\n    def spelling_suggestion(self):\n        # Test the case where spelling support is disabled.\n        sqs = self.search_index.objects.filter(content=\"Indx\")\n        self.assertEqual(sqs.spelling_suggestion(), None)\n        self.assertEqual(sqs.spelling_suggestion(preferred_query=None), None)\n\n    def test_values(self):\n        sqs = self.search_index.objects.auto_query(\"test\").values(\"id\")\n        self.assert_(isinstance(sqs, ValuesSearchQuerySet))\n\n    def test_valueslist(self):\n        sqs = self.search_index.objects.auto_query(\"test\").values_list(\"id\")\n        self.assert_(isinstance(sqs, ValuesListSearchQuerySet))\n\n\nclass CustomManagerTestCase(TestCase):\n    fixtures = [\"bulk_data.json\"]\n\n    def setUp(self):\n        super().setUp()\n\n        self.search_index_1 = CustomMockModelIndexWithObjectsManager\n        self.search_index_2 = CustomMockModelIndexWithAnotherManager\n\n    def test_filter_object_manager(self):\n        sqs = self.search_index_1.objects.filter(content=\"foo\")\n        self.assertTrue(isinstance(sqs, SearchQuerySet))\n        self.assertEqual(len(sqs.query.query_filter), 2)\n\n    def test_filter_another_manager(self):\n        sqs = self.search_index_2.another.filter(content=\"foo\")\n        self.assertTrue(isinstance(sqs, SearchQuerySet))\n        self.assertEqual(len(sqs.query.query_filter), 2)\n"
  },
  {
    "path": "test_haystack/test_models.py",
    "content": "import logging as std_logging\nimport pickle\n\nfrom django.test import TestCase\n\nfrom haystack import connections\nfrom haystack.models import SearchResult\nfrom haystack.utils import log as logging\nfrom haystack.utils.loading import UnifiedIndex\nfrom test_haystack.core.models import MockModel\n\nfrom .mocks import MockSearchResult\nfrom .test_indexes import ReadQuerySetTestSearchIndex\n\n\nclass CaptureHandler(std_logging.Handler):\n    logs_seen = []\n\n    def emit(self, record):\n        CaptureHandler.logs_seen.append(record)\n\n\nclass SearchResultTestCase(TestCase):\n    fixtures = [\"base_data\"]\n\n    def setUp(self):\n        super().setUp()\n        cap = CaptureHandler()\n        logging.getLogger(\"haystack\").addHandler(cap)\n\n        self.no_data = {}\n        self.extra_data = {\"stored\": \"I am stored data. How fun.\"}\n        self.no_overwrite_data = {\n            \"django_id\": 2,\n            \"django_ct\": \"haystack.anothermockmodel\",\n            \"stored\": \"I am stored data. How fun.\",\n        }\n\n        # The str(1) bit might seem unnecessary but it avoids test_unicode needing to handle\n        # the differences between repr() output on Python 2 and 3 for a unicode literal:\n        self.no_data_sr = MockSearchResult(\"haystack\", \"mockmodel\", str(1), 2)\n        self.extra_data_sr = MockSearchResult(\n            \"haystack\", \"mockmodel\", str(1), 3, **self.extra_data\n        )\n        self.no_overwrite_data_sr = MockSearchResult(\n            \"haystack\", \"mockmodel\", str(1), 4, **self.no_overwrite_data\n        )\n\n    def test_init(self):\n        self.assertEqual(self.no_data_sr.app_label, \"haystack\")\n        self.assertEqual(self.no_data_sr.model_name, \"mockmodel\")\n        self.assertEqual(self.no_data_sr.model, MockModel)\n        self.assertEqual(self.no_data_sr.verbose_name, \"Mock model\")\n        self.assertEqual(self.no_data_sr.verbose_name_plural, \"Mock models\")\n        self.assertEqual(self.no_data_sr.pk, \"1\")\n        self.assertEqual(self.no_data_sr.score, 2)\n        self.assertEqual(self.no_data_sr.stored, None)\n\n        self.assertEqual(self.extra_data_sr.app_label, \"haystack\")\n        self.assertEqual(self.extra_data_sr.model_name, \"mockmodel\")\n        self.assertEqual(self.extra_data_sr.model, MockModel)\n        self.assertEqual(self.extra_data_sr.verbose_name, \"Mock model\")\n        self.assertEqual(self.extra_data_sr.verbose_name_plural, \"Mock models\")\n        self.assertEqual(self.extra_data_sr.pk, \"1\")\n        self.assertEqual(self.extra_data_sr.score, 3)\n        self.assertEqual(self.extra_data_sr.stored, \"I am stored data. How fun.\")\n\n        self.assertEqual(self.no_overwrite_data_sr.app_label, \"haystack\")\n        self.assertEqual(self.no_overwrite_data_sr.model_name, \"mockmodel\")\n        self.assertEqual(self.no_overwrite_data_sr.model, MockModel)\n        self.assertEqual(self.no_overwrite_data_sr.verbose_name, \"Mock model\")\n        self.assertEqual(self.no_overwrite_data_sr.verbose_name_plural, \"Mock models\")\n        self.assertEqual(self.no_overwrite_data_sr.pk, \"1\")\n        self.assertEqual(self.no_overwrite_data_sr.score, 4)\n        self.assertEqual(self.no_overwrite_data_sr.stored, \"I am stored data. How fun.\")\n\n    def test_get_additional_fields(self):\n        self.assertEqual(self.no_data_sr.get_additional_fields(), {})\n        self.assertEqual(\n            self.extra_data_sr.get_additional_fields(),\n            {\"stored\": \"I am stored data. How fun.\"},\n        )\n        self.assertEqual(\n            self.no_overwrite_data_sr.get_additional_fields(),\n            {\n                \"django_ct\": \"haystack.anothermockmodel\",\n                \"django_id\": 2,\n                \"stored\": \"I am stored data. How fun.\",\n            },\n        )\n\n    def test_unicode(self):\n        self.assertEqual(\n            self.no_data_sr.__str__(), \"<SearchResult: haystack.mockmodel (pk='1')>\"\n        )\n        self.assertEqual(\n            self.extra_data_sr.__str__(),\n            \"<SearchResult: haystack.mockmodel (pk='1')>\",\n        )\n        self.assertEqual(\n            self.no_overwrite_data_sr.__str__(),\n            \"<SearchResult: haystack.mockmodel (pk='1')>\",\n        )\n\n    def test_content_type(self):\n        self.assertEqual(self.no_data_sr.content_type(), \"core.mockmodel\")\n        self.assertEqual(self.extra_data_sr.content_type(), \"core.mockmodel\")\n        self.assertEqual(self.no_overwrite_data_sr.content_type(), \"core.mockmodel\")\n\n    def test_stored_fields(self):\n        # Stow.\n        old_unified_index = connections[\"default\"]._index\n        ui = UnifiedIndex()\n        ui.build(indexes=[])\n        connections[\"default\"]._index = ui\n\n        # Without registering, we should receive an empty dict.\n        self.assertEqual(self.no_data_sr.get_stored_fields(), {})\n        self.assertEqual(self.extra_data_sr.get_stored_fields(), {})\n        self.assertEqual(self.no_overwrite_data_sr.get_stored_fields(), {})\n\n        from haystack import indexes\n\n        class TestSearchIndex(indexes.SearchIndex, indexes.Indexable):\n            stored = indexes.CharField(model_attr=\"author\", document=True)\n\n            def get_model(self):\n                return MockModel\n\n        # Include the index & try again.\n        ui.document_field = \"stored\"\n        ui.build(indexes=[TestSearchIndex()])\n\n        self.assertEqual(self.no_data_sr.get_stored_fields(), {\"stored\": None})\n        self.assertEqual(\n            self.extra_data_sr.get_stored_fields(),\n            {\"stored\": \"I am stored data. How fun.\"},\n        )\n        self.assertEqual(\n            self.no_overwrite_data_sr.get_stored_fields(),\n            {\"stored\": \"I am stored data. How fun.\"},\n        )\n\n        # Restore.\n        connections[\"default\"]._index = old_unified_index\n\n    def test_missing_object(self):\n        awol1 = SearchResult(\"core\", \"mockmodel\", \"1000000\", 2)\n        self.assertEqual(awol1.app_label, \"core\")\n        self.assertEqual(awol1.model_name, \"mockmodel\")\n        self.assertEqual(awol1.pk, \"1000000\")\n        self.assertEqual(awol1.score, 2)\n\n        awol2 = SearchResult(\"core\", \"yetanothermockmodel\", \"1000000\", 2)\n        self.assertEqual(awol2.app_label, \"core\")\n        self.assertEqual(awol2.model_name, \"yetanothermockmodel\")\n        self.assertEqual(awol2.pk, \"1000000\")\n        self.assertEqual(awol2.score, 2)\n\n        # Failed lookups should fail gracefully.\n        CaptureHandler.logs_seen = []\n        self.assertEqual(awol1.model, MockModel)\n        self.assertEqual(awol1.object, None)\n        self.assertEqual(awol1.verbose_name, \"Mock model\")\n        self.assertEqual(awol1.verbose_name_plural, \"Mock models\")\n        self.assertEqual(awol1.stored, None)\n        self.assertEqual(len(CaptureHandler.logs_seen), 4)\n\n        CaptureHandler.logs_seen = []\n        self.assertEqual(awol2.model, None)\n        self.assertEqual(awol2.object, None)\n        self.assertEqual(awol2.verbose_name, \"\")\n        self.assertEqual(awol2.verbose_name_plural, \"\")\n        self.assertEqual(awol2.stored, None)\n        self.assertEqual(len(CaptureHandler.logs_seen), 12)\n\n    def test_read_queryset(self):\n        # The model is flagged deleted so not returned by the default manager.\n        deleted1 = SearchResult(\"core\", \"afifthmockmodel\", 2, 2)\n        self.assertEqual(deleted1.object, None)\n\n        # Stow.\n        old_unified_index = connections[\"default\"]._index\n        ui = UnifiedIndex()\n        ui.document_field = \"author\"\n        ui.build(indexes=[ReadQuerySetTestSearchIndex()])\n        connections[\"default\"]._index = ui\n\n        # The soft delete manager returns the object.\n        deleted2 = SearchResult(\"core\", \"afifthmockmodel\", 2, 2)\n        self.assertNotEqual(deleted2.object, None)\n        self.assertEqual(deleted2.object.author, \"sam2\")\n\n        # Restore.\n        connections[\"default\"]._index = old_unified_index\n\n    def test_pickling(self):\n        pickle_me_1 = SearchResult(\"core\", \"mockmodel\", \"1000000\", 2)\n        picklicious = pickle.dumps(pickle_me_1)\n\n        pickle_me_2 = pickle.loads(picklicious)\n        self.assertEqual(pickle_me_1.app_label, pickle_me_2.app_label)\n        self.assertEqual(pickle_me_1.model_name, pickle_me_2.model_name)\n        self.assertEqual(pickle_me_1.pk, pickle_me_2.pk)\n        self.assertEqual(pickle_me_1.score, pickle_me_2.score)\n"
  },
  {
    "path": "test_haystack/test_query.py",
    "content": "import datetime\nimport pickle\n\nfrom django.test import TestCase\nfrom django.test.utils import override_settings\n\nfrom haystack import connections, indexes, reset_search_queries\nfrom haystack.backends import SQ, BaseSearchQuery\nfrom haystack.exceptions import FacetingError\nfrom haystack.models import SearchResult\nfrom haystack.query import (\n    EmptySearchQuerySet,\n    SearchQuerySet,\n    ValuesListSearchQuerySet,\n    ValuesSearchQuerySet,\n)\nfrom haystack.utils.loading import UnifiedIndex\nfrom test_haystack.core.models import (\n    AnotherMockModel,\n    CharPKMockModel,\n    MockModel,\n    UUIDMockModel,\n)\n\nfrom .mocks import (\n    MOCK_SEARCH_RESULTS,\n    CharPKMockSearchBackend,\n    MockSearchBackend,\n    MockSearchQuery,\n    ReadQuerySetMockSearchBackend,\n    UUIDMockSearchBackend,\n)\nfrom .test_indexes import (\n    GhettoAFifthMockModelSearchIndex,\n    TextReadQuerySetTestSearchIndex,\n)\nfrom .test_views import BasicAnotherMockModelSearchIndex, BasicMockModelSearchIndex\n\n\nclass SQTestCase(TestCase):\n    def test_split_expression(self):\n        sq = SQ(foo=\"bar\")\n\n        self.assertEqual(sq.split_expression(\"foo\"), (\"foo\", \"content\"))\n        self.assertEqual(sq.split_expression(\"foo__exact\"), (\"foo\", \"exact\"))\n        self.assertEqual(sq.split_expression(\"foo__content\"), (\"foo\", \"content\"))\n        self.assertEqual(sq.split_expression(\"foo__contains\"), (\"foo\", \"contains\"))\n        self.assertEqual(sq.split_expression(\"foo__lt\"), (\"foo\", \"lt\"))\n        self.assertEqual(sq.split_expression(\"foo__lte\"), (\"foo\", \"lte\"))\n        self.assertEqual(sq.split_expression(\"foo__gt\"), (\"foo\", \"gt\"))\n        self.assertEqual(sq.split_expression(\"foo__gte\"), (\"foo\", \"gte\"))\n        self.assertEqual(sq.split_expression(\"foo__in\"), (\"foo\", \"in\"))\n        self.assertEqual(sq.split_expression(\"foo__startswith\"), (\"foo\", \"startswith\"))\n        self.assertEqual(sq.split_expression(\"foo__endswith\"), (\"foo\", \"endswith\"))\n        self.assertEqual(sq.split_expression(\"foo__range\"), (\"foo\", \"range\"))\n        self.assertEqual(sq.split_expression(\"foo__fuzzy\"), (\"foo\", \"fuzzy\"))\n\n        # Unrecognized filter. Fall back to exact.\n        self.assertEqual(sq.split_expression(\"foo__moof\"), (\"foo\", \"content\"))\n\n    def test_repr(self):\n        self.assertEqual(repr(SQ(foo=\"bar\")), \"<SQ: AND foo__content=bar>\")\n        self.assertEqual(repr(SQ(foo=1)), \"<SQ: AND foo__content=1>\")\n        self.assertEqual(\n            repr(SQ(foo=datetime.datetime(2009, 5, 12, 23, 17))),\n            \"<SQ: AND foo__content=2009-05-12 23:17:00>\",\n        )\n\n    def test_simple_nesting(self):\n        sq1 = SQ(foo=\"bar\")\n        sq2 = SQ(foo=\"bar\")\n        bigger_sq = SQ(sq1 & sq2)\n        self.assertEqual(\n            repr(bigger_sq), \"<SQ: AND (foo__content=bar AND foo__content=bar)>\"\n        )\n\n        another_bigger_sq = SQ(sq1 | sq2)\n        self.assertEqual(\n            repr(another_bigger_sq), \"<SQ: AND (foo__content=bar OR foo__content=bar)>\"\n        )\n\n        one_more_bigger_sq = SQ(sq1 & ~sq2)\n        self.assertEqual(\n            repr(one_more_bigger_sq),\n            \"<SQ: AND (foo__content=bar AND NOT (foo__content=bar))>\",\n        )\n\n        mega_sq = SQ(bigger_sq & SQ(another_bigger_sq | ~one_more_bigger_sq))\n        self.assertEqual(\n            repr(mega_sq),\n            \"<SQ: AND ((foo__content=bar AND foo__content=bar) AND ((foo__content=bar OR foo__content=bar) OR NOT ((foo__content=bar AND NOT (foo__content=bar)))))>\",\n        )\n\n\nclass BaseSearchQueryTestCase(TestCase):\n    fixtures = [\"base_data.json\", \"bulk_data.json\"]\n\n    def setUp(self):\n        super().setUp()\n        self.bsq = BaseSearchQuery()\n\n    def test_get_count(self):\n        self.bsq.add_filter(SQ(foo=\"bar\"))\n        self.assertRaises(NotImplementedError, self.bsq.get_count)\n\n    def test_build_query(self):\n        self.bsq.add_filter(SQ(foo=\"bar\"))\n        self.assertRaises(NotImplementedError, self.bsq.build_query)\n\n    def test_add_filter(self):\n        self.assertEqual(len(self.bsq.query_filter), 0)\n\n        self.bsq.add_filter(SQ(foo=\"bar\"))\n        self.assertEqual(len(self.bsq.query_filter), 1)\n\n        self.bsq.add_filter(SQ(foo__lt=\"10\"))\n\n        self.bsq.add_filter(~SQ(claris=\"moof\"))\n\n        self.bsq.add_filter(SQ(claris=\"moof\"), use_or=True)\n\n        self.assertEqual(\n            repr(self.bsq.query_filter),\n            \"<SQ: OR ((foo__content=bar AND foo__lt=10 AND NOT (claris__content=moof)) OR claris__content=moof)>\",\n        )\n\n        self.bsq.add_filter(SQ(claris=\"moof\"))\n\n        self.assertEqual(\n            repr(self.bsq.query_filter),\n            \"<SQ: AND (((foo__content=bar AND foo__lt=10 AND NOT (claris__content=moof)) OR claris__content=moof) AND claris__content=moof)>\",\n        )\n\n        self.bsq.add_filter(SQ(claris=\"wtf mate\"))\n\n        self.assertEqual(\n            repr(self.bsq.query_filter),\n            \"<SQ: AND (((foo__content=bar AND foo__lt=10 AND NOT (claris__content=moof)) OR claris__content=moof) AND claris__content=moof AND claris__content=wtf mate)>\",\n        )\n\n    def test_add_order_by(self):\n        self.assertEqual(len(self.bsq.order_by), 0)\n\n        self.bsq.add_order_by(\"foo\")\n        self.assertEqual(len(self.bsq.order_by), 1)\n\n    def test_clear_order_by(self):\n        self.bsq.add_order_by(\"foo\")\n        self.assertEqual(len(self.bsq.order_by), 1)\n\n        self.bsq.clear_order_by()\n        self.assertEqual(len(self.bsq.order_by), 0)\n\n    def test_add_model(self):\n        self.assertEqual(len(self.bsq.models), 0)\n        self.assertRaises(AttributeError, self.bsq.add_model, object)\n        self.assertEqual(len(self.bsq.models), 0)\n\n        self.bsq.add_model(MockModel)\n        self.assertEqual(len(self.bsq.models), 1)\n\n        self.bsq.add_model(AnotherMockModel)\n        self.assertEqual(len(self.bsq.models), 2)\n\n    def test_set_limits(self):\n        self.assertEqual(self.bsq.start_offset, 0)\n        self.assertEqual(self.bsq.end_offset, None)\n\n        self.bsq.set_limits(10, 50)\n        self.assertEqual(self.bsq.start_offset, 10)\n        self.assertEqual(self.bsq.end_offset, 50)\n\n    def test_clear_limits(self):\n        self.bsq.set_limits(10, 50)\n        self.assertEqual(self.bsq.start_offset, 10)\n        self.assertEqual(self.bsq.end_offset, 50)\n\n        self.bsq.clear_limits()\n        self.assertEqual(self.bsq.start_offset, 0)\n        self.assertEqual(self.bsq.end_offset, None)\n\n    def test_add_boost(self):\n        self.assertEqual(self.bsq.boost, {})\n\n        self.bsq.add_boost(\"foo\", 10)\n        self.assertEqual(self.bsq.boost, {\"foo\": 10})\n\n    def test_add_highlight(self):\n        self.assertEqual(self.bsq.highlight, False)\n\n        self.bsq.add_highlight()\n        self.assertEqual(self.bsq.highlight, True)\n\n    def test_more_like_this(self):\n        mock = MockModel()\n        mock.id = 1\n        msq = MockSearchQuery()\n        msq.backend = MockSearchBackend(\"mlt\")\n        ui = connections[\"default\"].get_unified_index()\n        bmmsi = BasicMockModelSearchIndex()\n        ui.build(indexes=[bmmsi])\n        bmmsi.update()\n        msq.more_like_this(mock)\n\n        self.assertEqual(msq.get_count(), 23)\n        self.assertEqual(int(msq.get_results()[0].pk), MOCK_SEARCH_RESULTS[0].pk)\n\n    def test_add_field_facet(self):\n        self.bsq.add_field_facet(\"foo\")\n        self.assertEqual(self.bsq.facets, {\"foo\": {}})\n\n        self.bsq.add_field_facet(\"bar\")\n        self.assertEqual(self.bsq.facets, {\"foo\": {}, \"bar\": {}})\n\n    def test_add_date_facet(self):\n        self.bsq.add_date_facet(\n            \"foo\",\n            start_date=datetime.date(2009, 2, 25),\n            end_date=datetime.date(2009, 3, 25),\n            gap_by=\"day\",\n        )\n        self.assertEqual(\n            self.bsq.date_facets,\n            {\n                \"foo\": {\n                    \"gap_by\": \"day\",\n                    \"start_date\": datetime.date(2009, 2, 25),\n                    \"end_date\": datetime.date(2009, 3, 25),\n                    \"gap_amount\": 1,\n                }\n            },\n        )\n\n        self.bsq.add_date_facet(\n            \"bar\",\n            start_date=datetime.date(2008, 1, 1),\n            end_date=datetime.date(2009, 12, 1),\n            gap_by=\"month\",\n        )\n        self.assertEqual(\n            self.bsq.date_facets,\n            {\n                \"foo\": {\n                    \"gap_by\": \"day\",\n                    \"start_date\": datetime.date(2009, 2, 25),\n                    \"end_date\": datetime.date(2009, 3, 25),\n                    \"gap_amount\": 1,\n                },\n                \"bar\": {\n                    \"gap_by\": \"month\",\n                    \"start_date\": datetime.date(2008, 1, 1),\n                    \"end_date\": datetime.date(2009, 12, 1),\n                    \"gap_amount\": 1,\n                },\n            },\n        )\n\n    def test_add_query_facet(self):\n        self.bsq.add_query_facet(\"foo\", \"bar\")\n        self.assertEqual(self.bsq.query_facets, [(\"foo\", \"bar\")])\n\n        self.bsq.add_query_facet(\"moof\", \"baz\")\n        self.assertEqual(self.bsq.query_facets, [(\"foo\", \"bar\"), (\"moof\", \"baz\")])\n\n        self.bsq.add_query_facet(\"foo\", \"baz\")\n        self.assertEqual(\n            self.bsq.query_facets, [(\"foo\", \"bar\"), (\"moof\", \"baz\"), (\"foo\", \"baz\")]\n        )\n\n    def test_add_stats(self):\n        self.bsq.add_stats_query(\"foo\", [\"bar\"])\n        self.assertEqual(self.bsq.stats, {\"foo\": [\"bar\"]})\n\n        self.bsq.add_stats_query(\"moof\", [\"bar\", \"baz\"])\n        self.assertEqual(self.bsq.stats, {\"foo\": [\"bar\"], \"moof\": [\"bar\", \"baz\"]})\n\n    def test_add_narrow_query(self):\n        self.bsq.add_narrow_query(\"foo:bar\")\n        self.assertEqual(self.bsq.narrow_queries, set([\"foo:bar\"]))\n\n        self.bsq.add_narrow_query(\"moof:baz\")\n        self.assertEqual(self.bsq.narrow_queries, set([\"foo:bar\", \"moof:baz\"]))\n\n    def test_set_result_class(self):\n        # Assert that we're defaulting to ``SearchResult``.\n        self.assertTrue(issubclass(self.bsq.result_class, SearchResult))\n\n        # Custom class.\n        class IttyBittyResult(object):\n            pass\n\n        self.bsq.set_result_class(IttyBittyResult)\n        self.assertTrue(issubclass(self.bsq.result_class, IttyBittyResult))\n\n        # Reset to default.\n        self.bsq.set_result_class(None)\n        self.assertTrue(issubclass(self.bsq.result_class, SearchResult))\n\n    def test_run(self):\n        # Stow.\n        self.old_unified_index = connections[\"default\"]._index\n        self.ui = UnifiedIndex()\n        self.bmmsi = BasicMockModelSearchIndex()\n        self.bammsi = BasicAnotherMockModelSearchIndex()\n        self.ui.build(indexes=[self.bmmsi, self.bammsi])\n        connections[\"default\"]._index = self.ui\n\n        # Update the \"index\".\n        backend = connections[\"default\"].get_backend()\n        backend.clear()\n        backend.update(self.bmmsi, MockModel.objects.all())\n\n        msq = connections[\"default\"].get_query()\n        self.assertEqual(len(msq.get_results()), 23)\n        self.assertEqual(int(msq.get_results()[0].pk), MOCK_SEARCH_RESULTS[0].pk)\n\n        # Restore.\n        connections[\"default\"]._index = self.old_unified_index\n\n    def test_clone(self):\n        self.bsq.add_filter(SQ(foo=\"bar\"))\n        self.bsq.add_filter(SQ(foo__lt=\"10\"))\n        self.bsq.add_filter(~SQ(claris=\"moof\"))\n        self.bsq.add_filter(SQ(claris=\"moof\"), use_or=True)\n        self.bsq.add_order_by(\"foo\")\n        self.bsq.add_model(MockModel)\n        self.bsq.add_boost(\"foo\", 2)\n        self.bsq.add_highlight()\n        self.bsq.add_field_facet(\"foo\")\n        self.bsq.add_date_facet(\n            \"foo\",\n            start_date=datetime.date(2009, 1, 1),\n            end_date=datetime.date(2009, 1, 31),\n            gap_by=\"day\",\n        )\n        self.bsq.add_query_facet(\"foo\", \"bar\")\n        self.bsq.add_stats_query(\"foo\", \"bar\")\n        self.bsq.add_narrow_query(\"foo:bar\")\n\n        clone = self.bsq._clone()\n        self.assertTrue(isinstance(clone, BaseSearchQuery))\n        self.assertEqual(len(clone.query_filter), 2)\n        self.assertEqual(len(clone.order_by), 1)\n        self.assertEqual(len(clone.models), 1)\n        self.assertEqual(len(clone.boost), 1)\n        self.assertEqual(clone.highlight, True)\n        self.assertEqual(len(clone.facets), 1)\n        self.assertEqual(len(clone.date_facets), 1)\n        self.assertEqual(len(clone.query_facets), 1)\n        self.assertEqual(len(clone.narrow_queries), 1)\n        self.assertEqual(clone.start_offset, self.bsq.start_offset)\n        self.assertEqual(clone.end_offset, self.bsq.end_offset)\n        self.assertEqual(clone.backend.__class__, self.bsq.backend.__class__)\n\n    def test_log_query(self):\n        reset_search_queries()\n        self.assertEqual(len(connections[\"default\"].queries), 0)\n\n        # Stow.\n        self.old_unified_index = connections[\"default\"]._index\n        self.ui = UnifiedIndex()\n        self.bmmsi = BasicMockModelSearchIndex()\n        self.ui.build(indexes=[self.bmmsi])\n        connections[\"default\"]._index = self.ui\n\n        # Update the \"index\".\n        backend = connections[\"default\"].get_backend()\n        backend.clear()\n        self.bmmsi.update()\n\n        with self.settings(DEBUG=False):\n            msq = connections[\"default\"].get_query()\n            self.assertEqual(len(msq.get_results()), 23)\n            self.assertEqual(len(connections[\"default\"].queries), 0)\n\n        with self.settings(DEBUG=True):\n            # Redefine it to clear out the cached results.\n            msq2 = connections[\"default\"].get_query()\n            self.assertEqual(len(msq2.get_results()), 23)\n            self.assertEqual(len(connections[\"default\"].queries), 1)\n            self.assertEqual(connections[\"default\"].queries[0][\"query_string\"], \"\")\n\n            msq3 = connections[\"default\"].get_query()\n            msq3.add_filter(SQ(foo=\"bar\"))\n            len(msq3.get_results())\n            self.assertEqual(len(connections[\"default\"].queries), 2)\n            self.assertEqual(connections[\"default\"].queries[0][\"query_string\"], \"\")\n            self.assertEqual(connections[\"default\"].queries[1][\"query_string\"], \"\")\n\n        # Restore.\n        connections[\"default\"]._index = self.old_unified_index\n\n\nclass CharPKMockModelSearchIndex(indexes.SearchIndex, indexes.Indexable):\n    text = indexes.CharField(document=True, model_attr=\"key\")\n\n    def get_model(self):\n        return CharPKMockModel\n\n\nclass SimpleMockUUIDModelIndex(indexes.SearchIndex, indexes.Indexable):\n    text = indexes.CharField(document=True, model_attr=\"characteristics\")\n\n    def get_model(self):\n        return UUIDMockModel\n\n\n@override_settings(DEBUG=True)\nclass SearchQuerySetTestCase(TestCase):\n    fixtures = [\"base_data.json\", \"bulk_data.json\"]\n\n    def setUp(self):\n        super().setUp()\n\n        # Stow.\n        self.old_unified_index = connections[\"default\"]._index\n        self.ui = UnifiedIndex()\n        self.bmmsi = BasicMockModelSearchIndex()\n        self.cpkmmsi = CharPKMockModelSearchIndex()\n        self.uuidmmsi = SimpleMockUUIDModelIndex()\n        self.ui.build(indexes=[self.bmmsi, self.cpkmmsi, self.uuidmmsi])\n        connections[\"default\"]._index = self.ui\n\n        # Update the \"index\".\n        backend = connections[\"default\"].get_backend()\n        backend.clear()\n        backend.update(self.bmmsi, MockModel.objects.all())\n\n        self.msqs = SearchQuerySet()\n\n        # Stow.\n        reset_search_queries()\n\n    def tearDown(self):\n        # Restore.\n        connections[\"default\"]._index = self.old_unified_index\n        super().tearDown()\n\n    def test_len(self):\n        self.assertEqual(len(self.msqs), 23)\n\n    def test_repr(self):\n        reset_search_queries()\n        self.assertEqual(len(connections[\"default\"].queries), 0)\n        self.assertRegexpMatches(\n            repr(self.msqs),\n            r\"^<SearchQuerySet: query=<test_haystack.mocks.MockSearchQuery object\"\n            r\" at 0x[0-9A-Fa-f]+>, using=None>$\",\n        )\n\n    def test_iter(self):\n        reset_search_queries()\n        self.assertEqual(len(connections[\"default\"].queries), 0)\n        msqs = self.msqs.all()\n        results = [int(res.pk) for res in iter(msqs)]\n        self.assertEqual(results, [res.pk for res in MOCK_SEARCH_RESULTS[:23]])\n        self.assertEqual(len(connections[\"default\"].queries), 3)\n\n    def test_slice(self):\n        reset_search_queries()\n        self.assertEqual(len(connections[\"default\"].queries), 0)\n        results = self.msqs.all()\n        self.assertEqual(\n            [int(res.pk) for res in results[1:11]],\n            [res.pk for res in MOCK_SEARCH_RESULTS[1:11]],\n        )\n        self.assertEqual(len(connections[\"default\"].queries), 1)\n\n        reset_search_queries()\n        self.assertEqual(len(connections[\"default\"].queries), 0)\n        results = self.msqs.all()\n        self.assertEqual(int(results[22].pk), MOCK_SEARCH_RESULTS[22].pk)\n        self.assertEqual(len(connections[\"default\"].queries), 1)\n\n    def test_manual_iter(self):\n        results = self.msqs.all()\n\n        reset_search_queries()\n        self.assertEqual(len(connections[\"default\"].queries), 0)\n\n        check = [result.pk for result in results._manual_iter()]\n        self.assertEqual(\n            check,\n            [\n                \"1\",\n                \"2\",\n                \"3\",\n                \"4\",\n                \"5\",\n                \"6\",\n                \"7\",\n                \"8\",\n                \"9\",\n                \"10\",\n                \"11\",\n                \"12\",\n                \"13\",\n                \"14\",\n                \"15\",\n                \"16\",\n                \"17\",\n                \"18\",\n                \"19\",\n                \"20\",\n                \"21\",\n                \"22\",\n                \"23\",\n            ],\n        )\n\n        self.assertEqual(len(connections[\"default\"].queries), 3)\n\n        reset_search_queries()\n        self.assertEqual(len(connections[\"default\"].queries), 0)\n\n        # Test to ensure we properly fill the cache, even if we get fewer\n        # results back (not a handled model) than the hit count indicates.\n        # This will hang indefinitely if broken.\n\n        # CharPK testing\n        old_ui = self.ui\n        self.ui.build(indexes=[self.cpkmmsi])\n        connections[\"default\"]._index = self.ui\n        self.cpkmmsi.update()\n\n        results = self.msqs.all()\n        loaded = [result.pk for result in results._manual_iter()]\n        self.assertEqual(loaded, [\"sometext\", \"1234\"])\n        self.assertEqual(len(connections[\"default\"].queries), 1)\n\n        # UUID testing\n        self.ui.build(indexes=[self.uuidmmsi])\n        connections[\"default\"]._index = self.ui\n        self.uuidmmsi.update()\n\n        results = self.msqs.all()\n        loaded = [result.pk for result in results._manual_iter()]\n        self.assertEqual(\n            loaded,\n            [\n                \"53554c58-7051-4350-bcc9-dad75eb248a9\",\n                \"77554c58-7051-4350-bcc9-dad75eb24888\",\n            ],\n        )\n\n        connections[\"default\"]._index = old_ui\n\n    def test_cache_is_full(self):\n        reset_search_queries()\n        self.assertEqual(len(connections[\"default\"].queries), 0)\n        self.assertEqual(self.msqs._cache_is_full(), False)\n        results = self.msqs.all()\n        fire_the_iterator_and_fill_cache = list(results)\n        self.assertEqual(23, len(fire_the_iterator_and_fill_cache))\n        self.assertEqual(results._cache_is_full(), True)\n        self.assertEqual(len(connections[\"default\"].queries), 4)\n\n    def test_all(self):\n        sqs = self.msqs.all()\n        self.assertTrue(isinstance(sqs, SearchQuerySet))\n\n    def test_filter(self):\n        sqs = self.msqs.filter(content=\"foo\")\n        self.assertTrue(isinstance(sqs, SearchQuerySet))\n        self.assertEqual(len(sqs.query.query_filter), 1)\n\n    def test_exclude(self):\n        sqs = self.msqs.exclude(content=\"foo\")\n        self.assertTrue(isinstance(sqs, SearchQuerySet))\n        self.assertEqual(len(sqs.query.query_filter), 1)\n\n    def test_order_by(self):\n        sqs = self.msqs.order_by(\"foo\")\n        self.assertTrue(isinstance(sqs, SearchQuerySet))\n        self.assertTrue(\"foo\" in sqs.query.order_by)\n\n    def test_models(self):\n        # Stow.\n        old_unified_index = connections[\"default\"]._index\n        ui = UnifiedIndex()\n        bmmsi = BasicMockModelSearchIndex()\n        bammsi = BasicAnotherMockModelSearchIndex()\n        ui.build(indexes=[bmmsi, bammsi])\n        connections[\"default\"]._index = ui\n\n        msqs = SearchQuerySet()\n\n        sqs = msqs.all()\n        self.assertTrue(isinstance(sqs, SearchQuerySet))\n        self.assertEqual(len(sqs.query.models), 0)\n\n        sqs = msqs.models(MockModel)\n        self.assertTrue(isinstance(sqs, SearchQuerySet))\n        self.assertEqual(len(sqs.query.models), 1)\n\n        sqs = msqs.models(MockModel, AnotherMockModel)\n        self.assertTrue(isinstance(sqs, SearchQuerySet))\n        self.assertEqual(len(sqs.query.models), 2)\n\n        # This will produce a warning.\n        ui.build(indexes=[bmmsi])\n        sqs = msqs.models(AnotherMockModel)\n        self.assertTrue(isinstance(sqs, SearchQuerySet))\n        self.assertEqual(len(sqs.query.models), 1)\n\n    def test_result_class(self):\n        sqs = self.msqs.all()\n        self.assertTrue(issubclass(sqs.query.result_class, SearchResult))\n\n        # Custom class.\n        class IttyBittyResult(object):\n            pass\n\n        sqs = self.msqs.result_class(IttyBittyResult)\n        self.assertTrue(issubclass(sqs.query.result_class, IttyBittyResult))\n\n        # Reset to default.\n        sqs = self.msqs.result_class(None)\n        self.assertTrue(issubclass(sqs.query.result_class, SearchResult))\n\n    def test_boost(self):\n        sqs = self.msqs.boost(\"foo\", 10)\n        self.assertTrue(isinstance(sqs, SearchQuerySet))\n        self.assertEqual(len(sqs.query.boost.keys()), 1)\n\n    def test_highlight(self):\n        sqs = self.msqs.highlight()\n        self.assertTrue(isinstance(sqs, SearchQuerySet))\n        self.assertEqual(sqs.query.highlight, True)\n\n    def test_spelling_override(self):\n        sqs = self.msqs.filter(content=\"not the spellchecking query\")\n        self.assertEqual(sqs.query.spelling_query, None)\n        sqs = self.msqs.set_spelling_query(\"override\")\n        self.assertTrue(isinstance(sqs, SearchQuerySet))\n        self.assertEqual(sqs.query.spelling_query, \"override\")\n\n    def test_spelling_suggestions(self):\n        # Test the case where spelling support is disabled.\n        sqs = self.msqs.filter(content=\"Indx\")\n        self.assertTrue(isinstance(sqs, SearchQuerySet))\n        self.assertEqual(sqs.spelling_suggestion(), None)\n        self.assertEqual(sqs.spelling_suggestion(\"indexy\"), None)\n\n    def test_raw_search(self):\n        self.assertEqual(len(self.msqs.raw_search(\"foo\")), 23)\n        self.assertEqual(\n            len(\n                self.msqs.raw_search(\"(content__exact:hello AND content__exact:world)\")\n            ),\n            23,\n        )\n\n    def test_load_all(self):\n        # Models with character primary keys.\n        sqs = SearchQuerySet()\n        sqs.query.backend = CharPKMockSearchBackend(\"charpk\")\n        results = sqs.load_all().all()\n        self.assertEqual(len(results._result_cache), 0)\n        results._fill_cache(0, 2)\n        self.assertEqual(\n            len([result for result in results._result_cache if result is not None]), 2\n        )\n\n        # Models with uuid primary keys.\n        sqs = SearchQuerySet()\n        sqs.query.backend = UUIDMockSearchBackend(\"uuid\")\n        results = sqs.load_all().all()\n        self.assertEqual(len(results._result_cache), 0)\n        results._fill_cache(0, 2)\n        self.assertEqual(\n            len([result for result in results._result_cache if result is not None]), 2\n        )\n\n        # If nothing is handled, you get nothing.\n        old_ui = connections[\"default\"]._index\n        ui = UnifiedIndex()\n        ui.build(indexes=[])\n        connections[\"default\"]._index = ui\n\n        sqs = self.msqs.load_all()\n        self.assertTrue(isinstance(sqs, SearchQuerySet))\n        self.assertEqual(len(sqs), 0)\n\n        connections[\"default\"]._index = old_ui\n\n        # For full tests, see the solr_backend.\n\n    def test_load_all_read_queryset(self):\n        # Stow.\n        old_ui = connections[\"default\"]._index\n        ui = UnifiedIndex()\n        gafmmsi = GhettoAFifthMockModelSearchIndex()\n        ui.build(indexes=[gafmmsi])\n        connections[\"default\"]._index = ui\n        gafmmsi.update()\n\n        sqs = SearchQuerySet()\n        results = sqs.load_all().all()\n        results.query.backend = ReadQuerySetMockSearchBackend(\"default\")\n        results._fill_cache(0, 2)\n\n        # The deleted result isn't returned\n        self.assertEqual(\n            len([result for result in results._result_cache if result is not None]), 1\n        )\n\n        # Register a SearchIndex with a read_queryset that returns deleted items\n        rqstsi = TextReadQuerySetTestSearchIndex()\n        ui.build(indexes=[rqstsi])\n        rqstsi.update()\n\n        sqs = SearchQuerySet()\n        results = sqs.load_all().all()\n        results.query.backend = ReadQuerySetMockSearchBackend(\"default\")\n        results._fill_cache(0, 2)\n\n        # Both the deleted and not deleted items are returned\n        self.assertEqual(\n            len([result for result in results._result_cache if result is not None]), 2\n        )\n\n        # Restore.\n        connections[\"default\"]._index = old_ui\n\n    def test_auto_query(self):\n        sqs = self.msqs.auto_query(\"test search -stuff\")\n        self.assertTrue(isinstance(sqs, SearchQuerySet))\n        self.assertEqual(\n            repr(sqs.query.query_filter),\n            \"<SQ: AND content__content=test search -stuff>\",\n        )\n\n        sqs = self.msqs.auto_query('test \"my thing\" search -stuff')\n        self.assertTrue(isinstance(sqs, SearchQuerySet))\n        self.assertEqual(\n            repr(sqs.query.query_filter),\n            '<SQ: AND content__content=test \"my thing\" search -stuff>',\n        )\n\n        sqs = self.msqs.auto_query(\"test \\\"my thing\\\" search 'moar quotes' -stuff\")\n        self.assertTrue(isinstance(sqs, SearchQuerySet))\n        self.assertEqual(\n            repr(sqs.query.query_filter),\n            \"<SQ: AND content__content=test \\\"my thing\\\" search 'moar quotes' -stuff>\",\n        )\n\n        sqs = self.msqs.auto_query('test \"my thing\" search \\'moar quotes\\' \"foo -stuff')\n        self.assertTrue(isinstance(sqs, SearchQuerySet))\n        self.assertEqual(\n            repr(sqs.query.query_filter),\n            '<SQ: AND content__content=test \"my thing\" search \\'moar quotes\\' \"foo -stuff>',\n        )\n\n        sqs = self.msqs.auto_query(\"test - stuff\")\n        self.assertTrue(isinstance(sqs, SearchQuerySet))\n        self.assertEqual(\n            repr(sqs.query.query_filter), \"<SQ: AND content__content=test - stuff>\"\n        )\n\n        # Ensure bits in exact matches get escaped properly as well.\n        sqs = self.msqs.auto_query('\"pants:rule\"')\n        self.assertTrue(isinstance(sqs, SearchQuerySet))\n        self.assertEqual(\n            repr(sqs.query.query_filter), '<SQ: AND content__content=\"pants:rule\">'\n        )\n\n        # Now with a different fieldname\n        sqs = self.msqs.auto_query(\"test search -stuff\", fieldname=\"title\")\n        self.assertTrue(isinstance(sqs, SearchQuerySet))\n        self.assertEqual(\n            repr(sqs.query.query_filter), \"<SQ: AND title__content=test search -stuff>\"\n        )\n\n        sqs = self.msqs.auto_query('test \"my thing\" search -stuff', fieldname=\"title\")\n        self.assertTrue(isinstance(sqs, SearchQuerySet))\n        self.assertEqual(\n            repr(sqs.query.query_filter),\n            '<SQ: AND title__content=test \"my thing\" search -stuff>',\n        )\n\n    def test_count(self):\n        self.assertEqual(self.msqs.count(), 23)\n\n    def test_facet_counts(self):\n        self.assertEqual(self.msqs.facet_counts(), {})\n\n    def test_best_match(self):\n        self.assertTrue(isinstance(self.msqs.best_match(), SearchResult))\n\n    def test_latest(self):\n        self.assertTrue(isinstance(self.msqs.latest(\"pub_date\"), SearchResult))\n\n    def test_more_like_this(self):\n        mock = MockModel()\n        mock.id = 1\n\n        self.assertEqual(len(self.msqs.more_like_this(mock)), 23)\n\n    def test_facets(self):\n        sqs = self.msqs.facet(\"foo\")\n        self.assertTrue(isinstance(sqs, SearchQuerySet))\n        self.assertEqual(len(sqs.query.facets), 1)\n\n        sqs2 = self.msqs.facet(\"foo\").facet(\"bar\")\n        self.assertTrue(isinstance(sqs2, SearchQuerySet))\n        self.assertEqual(len(sqs2.query.facets), 2)\n\n    def test_date_facets(self):\n        try:\n            sqs = self.msqs.date_facet(\n                \"foo\",\n                start_date=datetime.date(2008, 2, 25),\n                end_date=datetime.date(2009, 2, 25),\n                gap_by=\"smarblaph\",\n            )\n            self.fail()\n        except FacetingError as e:\n            self.assertEqual(\n                str(e),\n                \"The gap_by ('smarblaph') must be one of the following: year, month, day, hour, minute, second.\",\n            )\n\n        sqs = self.msqs.date_facet(\n            \"foo\",\n            start_date=datetime.date(2008, 2, 25),\n            end_date=datetime.date(2009, 2, 25),\n            gap_by=\"month\",\n        )\n        self.assertTrue(isinstance(sqs, SearchQuerySet))\n        self.assertEqual(len(sqs.query.date_facets), 1)\n\n        sqs2 = self.msqs.date_facet(\n            \"foo\",\n            start_date=datetime.date(2008, 2, 25),\n            end_date=datetime.date(2009, 2, 25),\n            gap_by=\"month\",\n        ).date_facet(\n            \"bar\",\n            start_date=datetime.date(2007, 2, 25),\n            end_date=datetime.date(2009, 2, 25),\n            gap_by=\"year\",\n        )\n        self.assertTrue(isinstance(sqs2, SearchQuerySet))\n        self.assertEqual(len(sqs2.query.date_facets), 2)\n\n    def test_query_facets(self):\n        sqs = self.msqs.query_facet(\"foo\", \"[bar TO *]\")\n        self.assertTrue(isinstance(sqs, SearchQuerySet))\n        self.assertEqual(len(sqs.query.query_facets), 1)\n\n        sqs2 = self.msqs.query_facet(\"foo\", \"[bar TO *]\").query_facet(\n            \"bar\", \"[100 TO 499]\"\n        )\n        self.assertTrue(isinstance(sqs2, SearchQuerySet))\n        self.assertEqual(len(sqs2.query.query_facets), 2)\n\n        # Test multiple query facets on a single field\n        sqs3 = (\n            self.msqs.query_facet(\"foo\", \"[bar TO *]\")\n            .query_facet(\"bar\", \"[100 TO 499]\")\n            .query_facet(\"foo\", \"[1000 TO 1499]\")\n        )\n        self.assertTrue(isinstance(sqs3, SearchQuerySet))\n        self.assertEqual(len(sqs3.query.query_facets), 3)\n\n    def test_stats(self):\n        sqs = self.msqs.stats_facet(\"foo\", \"bar\")\n        self.assertTrue(isinstance(sqs, SearchQuerySet))\n        self.assertEqual(len(sqs.query.stats), 1)\n\n        sqs2 = self.msqs.stats_facet(\"foo\", \"bar\").stats_facet(\"foo\", \"baz\")\n        self.assertTrue(isinstance(sqs2, SearchQuerySet))\n        self.assertEqual(len(sqs2.query.stats), 1)\n\n        sqs3 = self.msqs.stats_facet(\"foo\", \"bar\").stats_facet(\"moof\", \"baz\")\n        self.assertTrue(isinstance(sqs3, SearchQuerySet))\n        self.assertEqual(len(sqs3.query.stats), 2)\n\n    def test_narrow(self):\n        sqs = self.msqs.narrow(\"foo:moof\")\n        self.assertTrue(isinstance(sqs, SearchQuerySet))\n        self.assertEqual(len(sqs.query.narrow_queries), 1)\n\n    def test_clone(self):\n        results = self.msqs.filter(foo=\"bar\", foo__lt=\"10\")\n\n        clone = results._clone()\n        self.assertTrue(isinstance(clone, SearchQuerySet))\n        self.assertEqual(str(clone.query), str(results.query))\n        self.assertEqual(clone._result_cache, [])\n        self.assertEqual(clone._result_count, None)\n        self.assertEqual(clone._cache_full, False)\n        self.assertEqual(clone._using, results._using)\n\n    def test_using(self):\n        sqs = SearchQuerySet(using=\"default\")\n        self.assertNotEqual(sqs.query, None)\n        self.assertEqual(sqs.query._using, \"default\")\n\n    def test_chaining(self):\n        sqs = self.msqs.filter(content=\"foo\")\n        self.assertTrue(isinstance(sqs, SearchQuerySet))\n        self.assertEqual(len(sqs.query.query_filter), 1)\n\n        # A second instance should inherit none of the changes from above.\n        sqs = self.msqs.filter(content=\"bar\")\n        self.assertTrue(isinstance(sqs, SearchQuerySet))\n        self.assertEqual(len(sqs.query.query_filter), 1)\n\n    def test_none(self):\n        sqs = self.msqs.none()\n        self.assertTrue(isinstance(sqs, EmptySearchQuerySet))\n        self.assertEqual(len(sqs), 0)\n\n    def test___and__(self):\n        sqs1 = self.msqs.filter(content=\"foo\")\n        sqs2 = self.msqs.filter(content=\"bar\")\n        sqs = sqs1 & sqs2\n\n        self.assertTrue(isinstance(sqs, SearchQuerySet))\n        self.assertEqual(len(sqs.query.query_filter), 2)\n\n    def test___or__(self):\n        sqs1 = self.msqs.filter(content=\"foo\")\n        sqs2 = self.msqs.filter(content=\"bar\")\n        sqs = sqs1 | sqs2\n\n        self.assertTrue(isinstance(sqs, SearchQuerySet))\n        self.assertEqual(len(sqs.query.query_filter), 2)\n\n    def test_and_or(self):\n        \"\"\"\n        Combining AND queries with OR should give\n            AND(OR(a, b), OR(c, d))\n        \"\"\"\n        sqs1 = self.msqs.filter(content=\"foo\").filter(content=\"oof\")\n        sqs2 = self.msqs.filter(content=\"bar\").filter(content=\"rab\")\n        sqs = sqs1 | sqs2\n\n        self.assertEqual(sqs.query.query_filter.connector, \"OR\")\n        self.assertEqual(\n            repr(sqs.query.query_filter.children[0]), repr(sqs1.query.query_filter)\n        )\n        self.assertEqual(\n            repr(sqs.query.query_filter.children[1]), repr(sqs2.query.query_filter)\n        )\n\n    def test_or_and(self):\n        \"\"\"\n        Combining OR queries with AND should give\n            OR(AND(a, b), AND(c, d))\n        \"\"\"\n        sqs1 = self.msqs.filter(content=\"foo\").filter_or(content=\"oof\")\n        sqs2 = self.msqs.filter(content=\"bar\").filter_or(content=\"rab\")\n        sqs = sqs1 & sqs2\n\n        self.assertEqual(sqs.query.query_filter.connector, \"AND\")\n        self.assertEqual(\n            repr(sqs.query.query_filter.children[0]), repr(sqs1.query.query_filter)\n        )\n        self.assertEqual(\n            repr(sqs.query.query_filter.children[1]), repr(sqs2.query.query_filter)\n        )\n\n\nclass ValuesQuerySetTestCase(SearchQuerySetTestCase):\n    def test_values_sqs(self):\n        sqs = self.msqs.auto_query(\"test\").values(\"id\")\n        self.assert_(isinstance(sqs, ValuesSearchQuerySet))\n\n        # We'll do a basic test to confirm that slicing works as expected:\n        self.assert_(isinstance(sqs[0], dict))\n        self.assert_(isinstance(sqs[0:5][0], dict))\n\n    def test_valueslist_sqs(self):\n        sqs = self.msqs.auto_query(\"test\").values_list(\"id\")\n\n        self.assert_(isinstance(sqs, ValuesListSearchQuerySet))\n        self.assert_(isinstance(sqs[0], (list, tuple)))\n        self.assert_(isinstance(sqs[0:1][0], (list, tuple)))\n\n        self.assertRaises(\n            TypeError,\n            self.msqs.auto_query(\"test\").values_list,\n            \"id\",\n            \"score\",\n            flat=True,\n        )\n\n        flat_sqs = self.msqs.auto_query(\"test\").values_list(\"id\", flat=True)\n        self.assert_(isinstance(sqs, ValuesListSearchQuerySet))\n\n        # Note that this will actually be None because a mocked sqs lacks\n        # anything else:\n        self.assert_(flat_sqs[0] is None)\n        self.assert_(flat_sqs[0:1][0] is None)\n\n\nclass EmptySearchQuerySetTestCase(TestCase):\n    def setUp(self):\n        super().setUp()\n        self.esqs = EmptySearchQuerySet()\n\n    def test_get_count(self):\n        self.assertEqual(self.esqs.count(), 0)\n        self.assertEqual(len(self.esqs.all()), 0)\n\n    def test_filter(self):\n        sqs = self.esqs.filter(content=\"foo\")\n        self.assertTrue(isinstance(sqs, EmptySearchQuerySet))\n        self.assertEqual(len(sqs), 0)\n\n    def test_exclude(self):\n        sqs = self.esqs.exclude(content=\"foo\")\n        self.assertTrue(isinstance(sqs, EmptySearchQuerySet))\n        self.assertEqual(len(sqs), 0)\n\n    def test_slice(self):\n        sqs = self.esqs.filter(content=\"foo\")\n        self.assertTrue(isinstance(sqs, EmptySearchQuerySet))\n        self.assertEqual(len(sqs), 0)\n        self.assertEqual(sqs[:10], [])\n\n        try:\n            sqs[4]\n            self.fail()\n        except IndexError:\n            pass\n\n    def test_dictionary_lookup(self):\n        \"\"\"\n        Ensure doing a dictionary lookup raises a TypeError so\n        EmptySearchQuerySets can be used in templates.\n        \"\"\"\n        self.assertRaises(TypeError, lambda: self.esqs[\"count\"])\n\n\n@override_settings(DEBUG=True)\nclass PickleSearchQuerySetTestCase(TestCase):\n    fixtures = [\"base_data\"]\n\n    def setUp(self):\n        super().setUp()\n        # Stow.\n        self.old_unified_index = connections[\"default\"]._index\n        self.ui = UnifiedIndex()\n        self.bmmsi = BasicMockModelSearchIndex()\n        self.cpkmmsi = CharPKMockModelSearchIndex()\n        self.ui.build(indexes=[self.bmmsi, self.cpkmmsi])\n        connections[\"default\"]._index = self.ui\n\n        # Update the \"index\".\n        backend = connections[\"default\"].get_backend()\n        backend.clear()\n        backend.update(self.bmmsi, MockModel.objects.all())\n\n        self.msqs = SearchQuerySet()\n\n        # Stow.\n        reset_search_queries()\n\n    def tearDown(self):\n        # Restore.\n        connections[\"default\"]._index = self.old_unified_index\n        super().tearDown()\n\n    def test_pickling(self):\n        results = self.msqs.all()\n\n        for res in results:\n            # Make sure the cache is full.\n            pass\n\n        in_a_pickle = pickle.dumps(results)\n        like_a_cuke = pickle.loads(in_a_pickle)\n        self.assertEqual(len(like_a_cuke), len(results))\n        self.assertEqual(like_a_cuke[0].id, results[0].id)\n"
  },
  {
    "path": "test_haystack/test_templatetags.py",
    "content": "from django.conf import settings\nfrom django.core.exceptions import ImproperlyConfigured\nfrom django.template import Context, Template\nfrom django.test import TestCase\n\nfrom haystack.utils.highlighting import Highlighter\n\n\nclass BorkHighlighter(Highlighter):\n    def render_html(self, highlight_locations=None, start_offset=None, end_offset=None):\n        highlighted_chunk = self.text_block[start_offset:end_offset]\n\n        for word in self.query_words:\n            highlighted_chunk = highlighted_chunk.replace(word, \"Bork!\")\n\n        return highlighted_chunk\n\n\nclass TemplateTagTestCase(TestCase):\n    def render(self, template, context):\n        # Why on Earth does Django not have a TemplateTestCase yet?\n        t = Template(template)\n        c = Context(context)\n        return t.render(c)\n\n\nclass HighlightTestCase(TemplateTagTestCase):\n    def setUp(self):\n        super().setUp()\n        self.sample_entry = \"\"\"\nRegistering indexes in Haystack is very similar to registering models and\nModelAdmin classes in the Django admin site. If you want to override the default\nindexing behavior for your model you can specify your own SearchIndex class.\nThis is useful for ensuring that future-dated or non-live content is not indexed\nand searchable.\n\nEvery custom SearchIndex requires there be one and only one field with\ndocument=True. This is the primary field that will get passed to the backend\nfor indexing. For this field, you'll then need to create a template at\nsearch/indexes/myapp/note_text.txt. This allows you to customize the document\nthat will be passed to the search backend for indexing. A sample template might\nlook like.\n\nIn addition, you may specify other fields to be populated along with the\ndocument. In this case, we also index the user who authored the document as\nwell as the date the document was published. The variable you assign the\nSearchField to should directly map to the field your search backend is\nexpecting. You instantiate most search fields with a parameter that points to\nthe attribute of the object to populate that field with.\n\"\"\"\n\n    def test_simple(self):\n        template = \"\"\"{% load highlight %}{% highlight entry with query %}\"\"\"\n        context = {\"entry\": self.sample_entry, \"query\": \"index\"}\n        self.assertEqual(\n            self.render(template, context),\n            '...<span class=\"highlighted\">index</span>ing behavior for your model you can specify your own Search<span class=\"highlighted\">Index</span> class.\\nThis is useful for ensuring that future-dated or non-live content is not <span class=\"highlighted\">index</span>ed\\nand searchable.\\n\\nEvery custom Search<span class=\"highlighted\">Index</span> ...',\n        )\n\n        template = \"\"\"{% load highlight %}{% highlight entry with query html_tag \"div\" css_class \"foo\" max_length 100 %}\"\"\"\n        context = {\"entry\": self.sample_entry, \"query\": \"field\"}\n        self.assertEqual(\n            self.render(template, context),\n            '...<div class=\"foo\">field</div> with\\ndocument=True. This is the primary <div class=\"foo\">field</div> that will get passed to the backend\\nfor indexing...',\n        )\n\n        template = \"\"\"{% load highlight %}{% highlight entry with query html_tag \"div\" css_class \"foo\" max_length 100 %}\"\"\"\n        context = {\"entry\": self.sample_entry, \"query\": \"Haystack\"}\n        self.assertEqual(\n            self.render(template, context),\n            '...<div class=\"foo\">Haystack</div> is very similar to registering models and\\nModelAdmin classes in the Django admin site. If y...',\n        )\n\n        template = \"\"\"{% load highlight %}{% highlight \"xxxxxxxxxxxxx foo bbxxxxx foo\" with \"foo\" max_length 5 html_tag \"span\" %}\"\"\"\n        context = {}\n        self.assertEqual(\n            self.render(template, context),\n            '...<span class=\"highlighted\">foo</span> b...',\n        )\n\n    def test_custom(self):\n        # Stow.\n        old_custom_highlighter = getattr(settings, \"HAYSTACK_CUSTOM_HIGHLIGHTER\", None)\n        settings.HAYSTACK_CUSTOM_HIGHLIGHTER = \"not.here.FooHighlighter\"\n\n        template = \"\"\"{% load highlight %}{% highlight entry with query %}\"\"\"\n        context = {\"entry\": self.sample_entry, \"query\": \"index\"}\n        self.assertRaises(ImproperlyConfigured, self.render, template, context)\n\n        settings.HAYSTACK_CUSTOM_HIGHLIGHTER = (\n            \"test_haystack.test_templatetags.BorkHighlighter\"\n        )\n\n        template = \"\"\"{% load highlight %}{% highlight entry with query %}\"\"\"\n        context = {\"entry\": self.sample_entry, \"query\": \"index\"}\n        self.assertEqual(\n            self.render(template, context),\n            \"Bork!ing behavior for your model you can specify your own SearchIndex class.\\nThis is useful for ensuring that future-dated or non-live content is not Bork!ed\\nand searchable.\\n\\nEvery custom SearchIndex \",\n        )\n\n        # Restore.\n        settings.HAYSTACK_CUSTOM_HIGHLIGHTER = old_custom_highlighter\n"
  },
  {
    "path": "test_haystack/test_utils.py",
    "content": "from django.test import TestCase\nfrom django.test.utils import override_settings\n\nfrom haystack.utils import (\n    _lookup_identifier_method,\n    get_facet_field_name,\n    get_identifier,\n    log,\n)\nfrom haystack.utils.highlighting import Highlighter\nfrom test_haystack.core.models import MockModel\n\n\nclass GetIdentifierTestCase(TestCase):\n    def test_get_facet_field_name(self):\n        self.assertEqual(get_facet_field_name(\"id\"), \"id\")\n        self.assertEqual(get_facet_field_name(\"django_id\"), \"django_id\")\n        self.assertEqual(get_facet_field_name(\"django_ct\"), \"django_ct\")\n        self.assertEqual(get_facet_field_name(\"author\"), \"author_exact\")\n        self.assertEqual(get_facet_field_name(\"author_exact\"), \"author_exact_exact\")\n\n\nclass GetFacetFieldNameTestCase(TestCase):\n    fixtures = [\"base_data\"]\n\n    def test_get_identifier(self):\n        self.assertEqual(get_identifier(\"core.mockmodel.1\"), \"core.mockmodel.1\")\n\n        # Valid object.\n        mock = MockModel.objects.get(pk=1)\n        self.assertEqual(get_identifier(mock), \"core.mockmodel.1\")\n\n    @override_settings(\n        HAYSTACK_IDENTIFIER_METHOD=\"test_haystack.core.custom_identifier.get_identifier_method\"\n    )\n    def test_haystack_identifier_method(self):\n        # The custom implementation returns the MD-5 hash of the key value by\n        # default:\n        get_identifier = _lookup_identifier_method()\n        self.assertEqual(get_identifier(\"a.b.c\"), \"553f764f7b436175c0387e22b4a19213\")\n\n        # … but it also supports a custom override mechanism which would\n        # definitely fail with the default implementation:\n        class custom_id_class(object):\n            def get_custom_haystack_id(self):\n                return \"CUSTOM\"\n\n        self.assertEqual(get_identifier(custom_id_class()), \"CUSTOM\")\n\n    @override_settings(\n        HAYSTACK_IDENTIFIER_METHOD=\"test_haystack.core.custom_identifier.not_there\"\n    )\n    def test_haystack_identifier_method_bad_path(self):\n        self.assertRaises(AttributeError, _lookup_identifier_method)\n\n    @override_settings(HAYSTACK_IDENTIFIER_METHOD=\"core.not_there.not_there\")\n    def test_haystack_identifier_method_bad_module(self):\n        self.assertRaises(ImportError, _lookup_identifier_method)\n\n\nclass HighlighterTestCase(TestCase):\n    def setUp(self):\n        super().setUp()\n        self.document_1 = \"This is a test of the highlightable words detection. This is only a test. Were this an actual emergency, your text would have exploded in mid-air.\"\n        self.document_2 = (\n            \"The content of words in no particular order causes nothing to occur.\"\n        )\n        self.document_3 = \"%s %s\" % (self.document_1, self.document_2)\n\n    def test_find_highlightable_words(self):\n        highlighter = Highlighter(\"this test\")\n        highlighter.text_block = self.document_1\n        self.assertEqual(\n            highlighter.find_highlightable_words(),\n            {\"this\": [0, 53, 79], \"test\": [10, 68]},\n        )\n\n        # We don't stem for now.\n        highlighter = Highlighter(\"highlight tests\")\n        highlighter.text_block = self.document_1\n        self.assertEqual(\n            highlighter.find_highlightable_words(), {\"highlight\": [22], \"tests\": []}\n        )\n\n        # Ignore negated bits.\n        highlighter = Highlighter(\"highlight -test\")\n        highlighter.text_block = self.document_1\n        self.assertEqual(highlighter.find_highlightable_words(), {\"highlight\": [22]})\n\n    def test_find_window(self):\n        # The query doesn't matter for this method, so ignore it.\n        highlighter = Highlighter(\"\")\n        highlighter.text_block = self.document_1\n\n        # No query.\n        self.assertEqual(highlighter.find_window({}), (0, 200))\n\n        # Nothing found.\n        self.assertEqual(\n            highlighter.find_window({\"highlight\": [], \"tests\": []}), (0, 200)\n        )\n\n        # Simple cases.\n        self.assertEqual(\n            highlighter.find_window({\"highlight\": [0], \"tests\": [100]}), (0, 200)\n        )\n        self.assertEqual(\n            highlighter.find_window({\"highlight\": [99], \"tests\": [199]}), (99, 299)\n        )\n        self.assertEqual(\n            highlighter.find_window({\"highlight\": [0], \"tests\": [201]}), (0, 200)\n        )\n        self.assertEqual(\n            highlighter.find_window({\"highlight\": [203], \"tests\": [120]}), (120, 320)\n        )\n        self.assertEqual(\n            highlighter.find_window({\"highlight\": [], \"tests\": [100]}), (100, 300)\n        )\n        self.assertEqual(\n            highlighter.find_window({\"highlight\": [0], \"tests\": [80], \"moof\": [120]}),\n            (0, 200),\n        )\n\n        # Simple cases, with an outlier far outside the window.\n        self.assertEqual(\n            highlighter.find_window({\"highlight\": [0], \"tests\": [100, 450]}), (0, 200)\n        )\n        self.assertEqual(\n            highlighter.find_window({\"highlight\": [100], \"tests\": [220, 450]}),\n            (100, 300),\n        )\n        self.assertEqual(\n            highlighter.find_window({\"highlight\": [100], \"tests\": [350, 450]}),\n            (350, 550),\n        )\n        self.assertEqual(\n            highlighter.find_window(\n                {\"highlight\": [100], \"tests\": [220], \"moof\": [450]}\n            ),\n            (100, 300),\n        )\n\n        # Density checks.\n        self.assertEqual(\n            highlighter.find_window({\"highlight\": [0], \"tests\": [100, 180, 450]}),\n            (0, 200),\n        )\n        self.assertEqual(\n            highlighter.find_window(\n                {\"highlight\": [0, 40], \"tests\": [100, 200, 220, 450]}\n            ),\n            (40, 240),\n        )\n        self.assertEqual(\n            highlighter.find_window(\n                {\"highlight\": [0, 40], \"tests\": [100, 200, 220], \"moof\": [450]}\n            ),\n            (40, 240),\n        )\n        self.assertEqual(\n            highlighter.find_window(\n                {\n                    \"highlight\": [0, 40],\n                    \"tests\": [100, 200, 220],\n                    \"moof\": [294, 299, 450],\n                }\n            ),\n            (100, 300),\n        )\n\n    def test_render_html(self):\n        highlighter = Highlighter(\"this test\")\n        highlighter.text_block = self.document_1\n        self.assertEqual(\n            highlighter.render_html({\"this\": [0, 53, 79], \"test\": [10, 68]}, 0, 200),\n            '<span class=\"highlighted\">This</span> is a <span class=\"highlighted\">test</span> of the highlightable words detection. <span class=\"highlighted\">This</span> is only a <span class=\"highlighted\">test</span>. Were <span class=\"highlighted\">this</span> an actual emergency, your text would have exploded in mid-air.',\n        )\n\n        highlighter.text_block = self.document_2\n        self.assertEqual(\n            highlighter.render_html({\"this\": [0, 53, 79], \"test\": [10, 68]}, 0, 200),\n            \"The content of words in no particular order causes nothing to occur.\",\n        )\n\n        highlighter.text_block = self.document_3\n        self.assertEqual(\n            highlighter.render_html({\"this\": [0, 53, 79], \"test\": [10, 68]}, 0, 200),\n            '<span class=\"highlighted\">This</span> is a <span class=\"highlighted\">test</span> of the highlightable words detection. <span class=\"highlighted\">This</span> is only a <span class=\"highlighted\">test</span>. Were <span class=\"highlighted\">this</span> an actual emergency, your text would have exploded in mid-air. The content of words in no particular order causes no...',\n        )\n\n        highlighter = Highlighter(\"content detection\")\n        highlighter.text_block = self.document_3\n        self.assertEqual(\n            highlighter.render_html({\"content\": [151], \"detection\": [42]}, 42, 242),\n            '...<span class=\"highlighted\">detection</span>. This is only a test. Were this an actual emergency, your text would have exploded in mid-air. The <span class=\"highlighted\">content</span> of words in no particular order causes nothing to occur.',\n        )\n\n        self.assertEqual(\n            highlighter.render_html({\"content\": [151], \"detection\": [42]}, 42, 200),\n            '...<span class=\"highlighted\">detection</span>. This is only a test. Were this an actual emergency, your text would have exploded in mid-air. The <span class=\"highlighted\">content</span> of words in no particular order causes no...',\n        )\n\n        # One term found within another term.\n        highlighter = Highlighter(\"this is\")\n        highlighter.text_block = self.document_1\n        self.assertEqual(\n            highlighter.render_html(\n                {\"this\": [0, 53, 79], \"is\": [2, 5, 55, 58, 81]}, 0, 200\n            ),\n            '<span class=\"highlighted\">This</span> <span class=\"highlighted\">is</span> a test of the highlightable words detection. <span class=\"highlighted\">This</span> <span class=\"highlighted\">is</span> only a test. Were <span class=\"highlighted\">this</span> an actual emergency, your text would have exploded in mid-air.',\n        )\n\n        # Regression for repetition in the regular expression.\n        highlighter = Highlighter(\"i++\")\n        highlighter.text_block = \"Foo is i++ in most cases.\"\n        self.assertEqual(\n            highlighter.render_html({\"i++\": [7]}, 0, 200),\n            'Foo is <span class=\"highlighted\">i++</span> in most cases.',\n        )\n        highlighter = Highlighter(\"i**\")\n        highlighter.text_block = \"Foo is i** in most cases.\"\n        self.assertEqual(\n            highlighter.render_html({\"i**\": [7]}, 0, 200),\n            'Foo is <span class=\"highlighted\">i**</span> in most cases.',\n        )\n        highlighter = Highlighter(\"i..\")\n        highlighter.text_block = \"Foo is i.. in most cases.\"\n        self.assertEqual(\n            highlighter.render_html({\"i..\": [7]}, 0, 200),\n            'Foo is <span class=\"highlighted\">i..</span> in most cases.',\n        )\n        highlighter = Highlighter(\"i??\")\n        highlighter.text_block = \"Foo is i?? in most cases.\"\n        self.assertEqual(\n            highlighter.render_html({\"i??\": [7]}, 0, 200),\n            'Foo is <span class=\"highlighted\">i??</span> in most cases.',\n        )\n\n        # Regression for highlighting already highlighted HTML terms.\n        highlighter = Highlighter(\"span\")\n        highlighter.text_block = \"A span in spam makes html in a can.\"\n        self.assertEqual(\n            highlighter.render_html({\"span\": [2]}, 0, 200),\n            'A <span class=\"highlighted\">span</span> in spam makes html in a can.',\n        )\n\n        highlighter = Highlighter(\"highlight\")\n        highlighter.text_block = \"A span in spam makes highlighted html in a can.\"\n        self.assertEqual(\n            highlighter.render_html({\"highlight\": [21]}, 0, 200),\n            'A span in spam makes <span class=\"highlighted\">highlight</span>ed html in a can.',\n        )\n\n    def test_highlight(self):\n        highlighter = Highlighter(\"this test\")\n        self.assertEqual(\n            highlighter.highlight(self.document_1),\n            '<span class=\"highlighted\">This</span> is a <span class=\"highlighted\">test</span> of the highlightable words detection. <span class=\"highlighted\">This</span> is only a <span class=\"highlighted\">test</span>. Were <span class=\"highlighted\">this</span> an actual emergency, your text would have exploded in mid-air.',\n        )\n        self.assertEqual(\n            highlighter.highlight(self.document_2),\n            \"The content of words in no particular order causes nothing to occur.\",\n        )\n        self.assertEqual(\n            highlighter.highlight(self.document_3),\n            '<span class=\"highlighted\">This</span> is a <span class=\"highlighted\">test</span> of the highlightable words detection. <span class=\"highlighted\">This</span> is only a <span class=\"highlighted\">test</span>. Were <span class=\"highlighted\">this</span> an actual emergency, your text would have exploded in mid-air. The content of words in no particular order causes no...',\n        )\n\n        highlighter = Highlighter(\"this test\", html_tag=\"div\", css_class=None)\n        self.assertEqual(\n            highlighter.highlight(self.document_1),\n            \"<div>This</div> is a <div>test</div> of the highlightable words detection. <div>This</div> is only a <div>test</div>. Were <div>this</div> an actual emergency, your text would have exploded in mid-air.\",\n        )\n        self.assertEqual(\n            highlighter.highlight(self.document_2),\n            \"The content of words in no particular order causes nothing to occur.\",\n        )\n        self.assertEqual(\n            highlighter.highlight(self.document_3),\n            \"<div>This</div> is a <div>test</div> of the highlightable words detection. <div>This</div> is only a <div>test</div>. Were <div>this</div> an actual emergency, your text would have exploded in mid-air. The content of words in no particular order causes no...\",\n        )\n\n        highlighter = Highlighter(\"content detection\")\n        self.assertEqual(\n            highlighter.highlight(self.document_1),\n            '...<span class=\"highlighted\">detection</span>. This is only a test. Were this an actual emergency, your text would have exploded in mid-air.',\n        )\n        self.assertEqual(\n            highlighter.highlight(self.document_2),\n            '...<span class=\"highlighted\">content</span> of words in no particular order causes nothing to occur.',\n        )\n        self.assertEqual(\n            highlighter.highlight(self.document_3),\n            '...<span class=\"highlighted\">detection</span>. This is only a test. Were this an actual emergency, your text would have exploded in mid-air. The <span class=\"highlighted\">content</span> of words in no particular order causes nothing to occur.',\n        )\n\n        highlighter = Highlighter(\"content detection\", max_length=100)\n        self.assertEqual(\n            highlighter.highlight(self.document_1),\n            '...<span class=\"highlighted\">detection</span>. This is only a test. Were this an actual emergency, your text would have exploded in mid-...',\n        )\n        self.assertEqual(\n            highlighter.highlight(self.document_2),\n            '...<span class=\"highlighted\">content</span> of words in no particular order causes nothing to occur.',\n        )\n        self.assertEqual(\n            highlighter.highlight(self.document_3),\n            'This is a test of the highlightable words <span class=\"highlighted\">detection</span>. This is only a test. Were this an actual emerge...',\n        )\n\n\nclass LoggingFacadeTestCase(TestCase):\n    def test_everything_noops_if_settings_are_off(self):\n        with self.settings(HAYSTACK_LOGGING=False):\n            l = log.LoggingFacade(None)\n            l.error()\n\n    def test_uses_provided_logger_if_logging_is_on(self):\n        with self.settings(HAYSTACK_LOGGING=True):\n            l = log.LoggingFacade(None)\n            try:\n                l.error()\n            except AttributeError:\n                pass\n\n    def test_uses_provided_logger_by_default(self):\n        class Logger(object):\n            def __init__(self):\n                self.was_called = False\n\n            def error(self):\n                self.was_called = True\n\n        l = log.LoggingFacade(Logger())\n        self.assertFalse(l.was_called, msg=\"sanity check\")\n        l.error()\n        self.assertTrue(l.was_called)\n"
  },
  {
    "path": "test_haystack/test_views.py",
    "content": "import queue\nimport time\nfrom threading import Thread\n\nfrom django import forms\nfrom django.http import HttpRequest, QueryDict\nfrom django.test import TestCase, override_settings\nfrom django.urls import reverse\n\nfrom haystack import connections, indexes\nfrom haystack.forms import FacetedSearchForm, ModelSearchForm, SearchForm\nfrom haystack.query import EmptySearchQuerySet\nfrom haystack.utils.loading import UnifiedIndex\nfrom haystack.views import FacetedSearchView, SearchView, search_view_factory\nfrom test_haystack.core.models import AnotherMockModel, MockModel\n\n\nclass InitialedSearchForm(SearchForm):\n    q = forms.CharField(initial=\"Search for...\", required=False, label=\"Search\")\n\n\nclass BasicMockModelSearchIndex(indexes.BasicSearchIndex, indexes.Indexable):\n    def get_model(self):\n        return MockModel\n\n\nclass BasicAnotherMockModelSearchIndex(indexes.BasicSearchIndex, indexes.Indexable):\n    def get_model(self):\n        return AnotherMockModel\n\n\nclass SearchViewTestCase(TestCase):\n    fixtures = [\"base_data\"]\n\n    def setUp(self):\n        super().setUp()\n\n        # Stow.\n        self.old_unified_index = connections[\"default\"]._index\n        self.ui = UnifiedIndex()\n        self.bmmsi = BasicMockModelSearchIndex()\n        self.bammsi = BasicAnotherMockModelSearchIndex()\n        self.ui.build(indexes=[self.bmmsi, self.bammsi])\n        connections[\"default\"]._index = self.ui\n\n        # Update the \"index\".\n        backend = connections[\"default\"].get_backend()\n        backend.clear()\n        backend.update(self.bmmsi, MockModel.objects.all())\n\n    def tearDown(self):\n        connections[\"default\"]._index = self.old_unified_index\n        super().tearDown()\n\n    def test_search_no_query(self):\n        response = self.client.get(reverse(\"haystack_search\"))\n        self.assertEqual(response.status_code, 200)\n\n    def test_search_query(self):\n        response = self.client.get(reverse(\"haystack_search\"), {\"q\": \"haystack\"})\n        self.assertEqual(response.status_code, 200)\n        self.assertIn(\"page\", response.context)\n        self.assertNotIn(\"page_obj\", response.context)\n        self.assertEqual(len(response.context[-1][\"page\"].object_list), 3)\n        self.assertEqual(\n            response.context[-1][\"page\"].object_list[0].content_type(), \"core.mockmodel\"\n        )\n        self.assertEqual(response.context[-1][\"page\"].object_list[0].pk, \"1\")\n\n    def test_invalid_page(self):\n        response = self.client.get(\n            reverse(\"haystack_search\"), {\"q\": \"haystack\", \"page\": \"165233\"}\n        )\n        self.assertEqual(response.status_code, 404)\n\n    def test_empty_results(self):\n        sv = SearchView()\n        sv.request = HttpRequest()\n        sv.form = sv.build_form()\n        self.assertTrue(isinstance(sv.get_results(), EmptySearchQuerySet))\n\n    def test_initial_data(self):\n        sv = SearchView(form_class=InitialedSearchForm)\n        sv.request = HttpRequest()\n        form = sv.build_form()\n        self.assertTrue(isinstance(form, InitialedSearchForm))\n        self.assertEqual(form.fields[\"q\"].initial, \"Search for...\")\n        para = form.as_p()\n        self.assertTrue('<label for=\"id_q\">Search:</label>' in para)\n        self.assertTrue('value=\"Search for...\"' in para)\n\n    def test_pagination(self):\n        response = self.client.get(\n            reverse(\"haystack_search\"), {\"q\": \"haystack\", \"page\": 0}\n        )\n        self.assertEqual(response.status_code, 404)\n        response = self.client.get(\n            reverse(\"haystack_search\"), {\"q\": \"haystack\", \"page\": 1}\n        )\n        self.assertEqual(response.status_code, 200)\n        self.assertEqual(len(response.context[-1][\"page\"].object_list), 3)\n        response = self.client.get(\n            reverse(\"haystack_search\"), {\"q\": \"haystack\", \"page\": 2}\n        )\n        self.assertEqual(response.status_code, 404)\n\n    def test_thread_safety(self):\n        exceptions = []\n\n        def threaded_view(resp_queue, view, request):\n            time.sleep(2)\n\n            try:\n                view(request)\n                resp_queue.put(request.GET[\"name\"])\n            except Exception as e:\n                exceptions.append(e)\n                raise\n\n        class ThreadedSearchView(SearchView):\n            def __call__(self, request):\n                print(\"Name: %s\" % request.GET[\"name\"])\n                return super().__call__(request)\n\n        view = search_view_factory(view_class=ThreadedSearchView)\n        resp_queue = queue.Queue()\n        request_1 = HttpRequest()\n        request_1.GET = {\"name\": \"foo\"}\n        request_2 = HttpRequest()\n        request_2.GET = {\"name\": \"bar\"}\n\n        th1 = Thread(target=threaded_view, args=(resp_queue, view, request_1))\n        th2 = Thread(target=threaded_view, args=(resp_queue, view, request_2))\n\n        th1.start()\n        th2.start()\n        th1.join()\n        th2.join()\n\n        foo = resp_queue.get()\n        bar = resp_queue.get()\n        self.assertNotEqual(foo, bar)\n\n    def test_spelling(self):\n        # Stow.\n        from django.conf import settings\n\n        old = settings.HAYSTACK_CONNECTIONS[\"default\"].get(\"INCLUDE_SPELLING\", None)\n\n        settings.HAYSTACK_CONNECTIONS[\"default\"][\"INCLUDE_SPELLING\"] = True\n\n        sv = SearchView()\n        sv.query = \"Nothing\"\n        sv.results = []\n        sv.build_page = lambda: (None, None)\n        sv.create_response()\n        context = sv.get_context()\n\n        self.assertIn(\n            \"suggestion\",\n            context,\n            msg=\"Spelling suggestions should be present even if\"\n            \" no results were returned\",\n        )\n        self.assertEqual(context[\"suggestion\"], None)\n\n        # Restore\n        settings.HAYSTACK_CONNECTIONS[\"default\"][\"INCLUDE_SPELLING\"] = old\n\n        if old is None:\n            del settings.HAYSTACK_CONNECTIONS[\"default\"][\"INCLUDE_SPELLING\"]\n\n\n@override_settings(ROOT_URLCONF=\"test_haystack.results_per_page_urls\")\nclass ResultsPerPageTestCase(TestCase):\n    fixtures = [\"base_data\"]\n\n    def setUp(self):\n        super().setUp()\n\n        # Stow.\n        self.old_unified_index = connections[\"default\"]._index\n        self.ui = UnifiedIndex()\n        self.bmmsi = BasicMockModelSearchIndex()\n        self.bammsi = BasicAnotherMockModelSearchIndex()\n        self.ui.build(indexes=[self.bmmsi, self.bammsi])\n        connections[\"default\"]._index = self.ui\n\n        # Update the \"index\".\n        backend = connections[\"default\"].get_backend()\n        backend.clear()\n        backend.update(self.bmmsi, MockModel.objects.all())\n\n    def tearDown(self):\n        connections[\"default\"]._index = self.old_unified_index\n        super().tearDown()\n\n    def test_custom_results_per_page(self):\n        response = self.client.get(\"/search/\", {\"q\": \"haystack\"})\n        self.assertEqual(response.status_code, 200)\n        self.assertEqual(len(response.context[-1][\"page\"].object_list), 1)\n        self.assertEqual(response.context[-1][\"paginator\"].per_page, 1)\n\n        response = self.client.get(\"/search2/\", {\"q\": \"hello world\"})\n        self.assertEqual(response.status_code, 200)\n        self.assertEqual(len(response.context[-1][\"page\"].object_list), 2)\n        self.assertEqual(response.context[-1][\"paginator\"].per_page, 2)\n\n\nclass FacetedSearchViewTestCase(TestCase):\n    def setUp(self):\n        super().setUp()\n\n        # Stow.\n        self.old_unified_index = connections[\"default\"]._index\n        self.ui = UnifiedIndex()\n        self.bmmsi = BasicMockModelSearchIndex()\n        self.bammsi = BasicAnotherMockModelSearchIndex()\n        self.ui.build(indexes=[self.bmmsi, self.bammsi])\n        connections[\"default\"]._index = self.ui\n\n        # Update the \"index\".\n        backend = connections[\"default\"].get_backend()\n        backend.clear()\n        backend.update(self.bmmsi, MockModel.objects.all())\n\n    def tearDown(self):\n        connections[\"default\"]._index = self.old_unified_index\n        super().tearDown()\n\n    def test_search_no_query(self):\n        response = self.client.get(reverse(\"haystack_faceted_search\"))\n        self.assertEqual(response.status_code, 200)\n        self.assertEqual(response.context[\"facets\"], {})\n\n    def test_empty_results(self):\n        fsv = FacetedSearchView()\n        fsv.request = HttpRequest()\n        fsv.request.GET = QueryDict(\"\")\n        fsv.form = fsv.build_form()\n        self.assertTrue(isinstance(fsv.get_results(), EmptySearchQuerySet))\n\n    def test_default_form(self):\n        fsv = FacetedSearchView()\n        fsv.request = HttpRequest()\n        fsv.request.GET = QueryDict(\"\")\n        fsv.form = fsv.build_form()\n        self.assertTrue(isinstance(fsv.form, FacetedSearchForm))\n\n    def test_list_selected_facets(self):\n        fsv = FacetedSearchView()\n        fsv.request = HttpRequest()\n        fsv.request.GET = QueryDict(\"\")\n        fsv.form = fsv.build_form()\n        self.assertEqual(fsv.form.selected_facets, [])\n\n        fsv = FacetedSearchView()\n        fsv.request = HttpRequest()\n        fsv.request.GET = QueryDict(\n            \"selected_facets=author:daniel&selected_facets=author:chris\"\n        )\n        fsv.form = fsv.build_form()\n        self.assertEqual(fsv.form.selected_facets, [\"author:daniel\", \"author:chris\"])\n\n\nclass BasicSearchViewTestCase(TestCase):\n    fixtures = [\"base_data\"]\n\n    def setUp(self):\n        super().setUp()\n\n        # Stow.\n        self.old_unified_index = connections[\"default\"]._index\n        self.ui = UnifiedIndex()\n        self.bmmsi = BasicMockModelSearchIndex()\n        self.bammsi = BasicAnotherMockModelSearchIndex()\n        self.ui.build(indexes=[self.bmmsi, self.bammsi])\n        connections[\"default\"]._index = self.ui\n\n        # Update the \"index\".\n        backend = connections[\"default\"].get_backend()\n        backend.clear()\n        backend.update(self.bmmsi, MockModel.objects.all())\n\n    def tearDown(self):\n        connections[\"default\"]._index = self.old_unified_index\n        super().tearDown()\n\n    def test_search_no_query(self):\n        response = self.client.get(reverse(\"haystack_basic_search\"))\n        self.assertEqual(response.status_code, 200)\n\n    def test_search_query(self):\n        response = self.client.get(reverse(\"haystack_basic_search\"), {\"q\": \"haystack\"})\n        self.assertEqual(response.status_code, 200)\n        self.assertEqual(type(response.context[-1][\"form\"]), ModelSearchForm)\n        self.assertEqual(len(response.context[-1][\"page\"].object_list), 3)\n        self.assertEqual(\n            response.context[-1][\"page\"].object_list[0].content_type(), \"core.mockmodel\"\n        )\n        self.assertEqual(response.context[-1][\"page\"].object_list[0].pk, \"1\")\n        self.assertEqual(response.context[-1][\"query\"], \"haystack\")\n\n    def test_invalid_page(self):\n        response = self.client.get(\n            reverse(\"haystack_basic_search\"), {\"q\": \"haystack\", \"page\": \"165233\"}\n        )\n        self.assertEqual(response.status_code, 404)\n"
  },
  {
    "path": "test_haystack/utils.py",
    "content": "import unittest\n\nfrom django.conf import settings\n\n\ndef check_solr(using=\"solr\"):\n    try:\n        from pysolr import Solr, SolrError\n    except ImportError:\n        raise unittest.SkipTest(\"pysolr not installed.\")\n\n    solr = Solr(settings.HAYSTACK_CONNECTIONS[using][\"URL\"])\n    try:\n        solr.search(\"*:*\")\n    except SolrError as e:\n        raise unittest.SkipTest(\n            \"solr not running on %r\" % settings.HAYSTACK_CONNECTIONS[using][\"URL\"], e\n        )\n"
  },
  {
    "path": "test_haystack/whoosh_tests/__init__.py",
    "content": "import warnings\n\nwarnings.simplefilter(\"ignore\", Warning)\n"
  },
  {
    "path": "test_haystack/whoosh_tests/test_forms.py",
    "content": "\"\"\"Tests for Whoosh spelling suggestions\"\"\"\nfrom django.conf import settings\nfrom django.http import HttpRequest\n\nfrom haystack.forms import SearchForm\nfrom haystack.query import SearchQuerySet\nfrom haystack.views import SearchView\n\nfrom .test_whoosh_backend import LiveWhooshRoundTripTestCase\n\n\nclass SpellingSuggestionTestCase(LiveWhooshRoundTripTestCase):\n    fixtures = [\"base_data\"]\n\n    def setUp(self):\n        self.old_spelling_setting = settings.HAYSTACK_CONNECTIONS[\"whoosh\"].get(\n            \"INCLUDE_SPELLING\", False\n        )\n        settings.HAYSTACK_CONNECTIONS[\"whoosh\"][\"INCLUDE_SPELLING\"] = True\n\n        super().setUp()\n\n    def tearDown(self):\n        settings.HAYSTACK_CONNECTIONS[\"whoosh\"][\n            \"INCLUDE_SPELLING\"\n        ] = self.old_spelling_setting\n        super().tearDown()\n\n    def test_form_suggestion(self):\n        form = SearchForm({\"q\": \"exampl\"}, searchqueryset=SearchQuerySet(\"whoosh\"))\n        self.assertEqual(form.get_suggestion(), \"example\")\n\n    def test_view_suggestion(self):\n        view = SearchView(\n            template=\"test_suggestion.html\", searchqueryset=SearchQuerySet(\"whoosh\")\n        )\n        mock = HttpRequest()\n        mock.GET[\"q\"] = \"exampl\"\n        resp = view(mock)\n        self.assertEqual(resp.content, b\"Suggestion: example\")\n"
  },
  {
    "path": "test_haystack/whoosh_tests/test_inputs.py",
    "content": "from django.test import TestCase\n\nfrom haystack import connections, inputs\n\n\nclass WhooshInputTestCase(TestCase):\n    def setUp(self):\n        super().setUp()\n        self.query_obj = connections[\"whoosh\"].get_query()\n\n    def test_raw_init(self):\n        raw = inputs.Raw(\"hello OR there, :you\")\n        self.assertEqual(raw.query_string, \"hello OR there, :you\")\n        self.assertEqual(raw.kwargs, {})\n        self.assertEqual(raw.post_process, False)\n\n        raw = inputs.Raw(\"hello OR there, :you\", test=\"really\")\n        self.assertEqual(raw.query_string, \"hello OR there, :you\")\n        self.assertEqual(raw.kwargs, {\"test\": \"really\"})\n        self.assertEqual(raw.post_process, False)\n\n    def test_raw_prepare(self):\n        raw = inputs.Raw(\"hello OR there, :you\")\n        self.assertEqual(raw.prepare(self.query_obj), \"hello OR there, :you\")\n\n    def test_clean_init(self):\n        clean = inputs.Clean(\"hello OR there, :you\")\n        self.assertEqual(clean.query_string, \"hello OR there, :you\")\n        self.assertEqual(clean.post_process, True)\n\n    def test_clean_prepare(self):\n        clean = inputs.Clean(\"hello OR there, :you\")\n        self.assertEqual(clean.prepare(self.query_obj), \"hello or there, ':you'\")\n\n    def test_exact_init(self):\n        exact = inputs.Exact(\"hello OR there, :you\")\n        self.assertEqual(exact.query_string, \"hello OR there, :you\")\n        self.assertEqual(exact.post_process, True)\n\n    def test_exact_prepare(self):\n        exact = inputs.Exact(\"hello OR there, :you\")\n        self.assertEqual(exact.prepare(self.query_obj), '\"hello OR there, :you\"')\n\n    def test_not_init(self):\n        not_it = inputs.Not(\"hello OR there, :you\")\n        self.assertEqual(not_it.query_string, \"hello OR there, :you\")\n        self.assertEqual(not_it.post_process, True)\n\n    def test_not_prepare(self):\n        not_it = inputs.Not(\"hello OR there, :you\")\n        self.assertEqual(not_it.prepare(self.query_obj), \"NOT (hello or there, ':you')\")\n\n    def test_autoquery_init(self):\n        autoquery = inputs.AutoQuery('panic -don\\'t \"froody dude\"')\n        self.assertEqual(autoquery.query_string, 'panic -don\\'t \"froody dude\"')\n        self.assertEqual(autoquery.post_process, False)\n\n    def test_autoquery_prepare(self):\n        autoquery = inputs.AutoQuery('panic -don\\'t \"froody dude\"')\n        self.assertEqual(\n            autoquery.prepare(self.query_obj), 'panic NOT don\\'t \"froody dude\"'\n        )\n\n    def test_altparser_init(self):\n        altparser = inputs.AltParser(\"dismax\")\n        self.assertEqual(altparser.parser_name, \"dismax\")\n        self.assertEqual(altparser.query_string, \"\")\n        self.assertEqual(altparser.kwargs, {})\n        self.assertEqual(altparser.post_process, False)\n\n        altparser = inputs.AltParser(\"dismax\", \"douglas adams\", qf=\"author\", mm=1)\n        self.assertEqual(altparser.parser_name, \"dismax\")\n        self.assertEqual(altparser.query_string, \"douglas adams\")\n        self.assertEqual(altparser.kwargs, {\"mm\": 1, \"qf\": \"author\"})\n        self.assertEqual(altparser.post_process, False)\n\n    def test_altparser_prepare(self):\n        altparser = inputs.AltParser(\"hello OR there, :you\")\n        # Not supported on that backend.\n        self.assertEqual(altparser.prepare(self.query_obj), \"\")\n"
  },
  {
    "path": "test_haystack/whoosh_tests/test_whoosh_backend.py",
    "content": "import os\nimport unittest\nfrom datetime import timedelta\nfrom decimal import Decimal\n\nfrom django.conf import settings\nfrom django.test import TestCase\nfrom django.test.utils import override_settings\nfrom django.utils.datetime_safe import date, datetime\nfrom whoosh.analysis import SpaceSeparatedTokenizer, SubstitutionFilter\nfrom whoosh.fields import BOOLEAN, DATETIME, KEYWORD, NUMERIC, TEXT\nfrom whoosh.qparser import QueryParser\n\nfrom haystack import connections, indexes, reset_search_queries\nfrom haystack.exceptions import SearchBackendError, SkipDocument\nfrom haystack.inputs import AutoQuery\nfrom haystack.models import SearchResult\nfrom haystack.query import SQ, SearchQuerySet\nfrom haystack.utils.loading import UnifiedIndex\n\nfrom ..core.models import AFourthMockModel, AnotherMockModel, MockModel\nfrom ..mocks import MockSearchResult\nfrom .testcases import WhooshTestCase\n\n\nclass WhooshMockSearchIndex(indexes.SearchIndex, indexes.Indexable):\n    text = indexes.CharField(document=True, use_template=True)\n    name = indexes.CharField(model_attr=\"author\")\n    pub_date = indexes.DateTimeField(model_attr=\"pub_date\")\n    name_analyzed = indexes.CharField(\n        model_attr=\"author\",\n        analyzer=SpaceSeparatedTokenizer() | SubstitutionFilter(r\"\\d+\", \"\"),\n    )\n\n    def get_model(self):\n        return MockModel\n\n\nclass WhooshMockSearchIndexWithSkipDocument(WhooshMockSearchIndex):\n    def prepare_text(self, obj):\n        if obj.author == \"daniel3\":\n            raise SkipDocument\n        return obj.author\n\n\nclass WhooshAnotherMockSearchIndex(indexes.SearchIndex, indexes.Indexable):\n    text = indexes.CharField(document=True)\n    name = indexes.CharField(model_attr=\"author\")\n    pub_date = indexes.DateTimeField(model_attr=\"pub_date\")\n\n    def get_model(self):\n        return AnotherMockModel\n\n    def prepare_text(self, obj):\n        return obj.author\n\n\nclass AllTypesWhooshMockSearchIndex(indexes.SearchIndex, indexes.Indexable):\n    text = indexes.CharField(document=True, use_template=True)\n    name = indexes.CharField(model_attr=\"author\", indexed=False)\n    pub_date = indexes.DateTimeField(model_attr=\"pub_date\")\n    sites = indexes.MultiValueField()\n    seen_count = indexes.IntegerField(indexed=False)\n    is_active = indexes.BooleanField(default=True)\n\n    def get_model(self):\n        return MockModel\n\n\nclass WhooshMaintainTypeMockSearchIndex(indexes.SearchIndex, indexes.Indexable):\n    text = indexes.CharField(document=True)\n    month = indexes.CharField(indexed=False)\n    pub_date = indexes.DateTimeField(model_attr=\"pub_date\")\n\n    def get_model(self):\n        return MockModel\n\n    def prepare_text(self, obj):\n        return \"Indexed!\\n%s\" % obj.pk\n\n    def prepare_month(self, obj):\n        return \"%02d\" % obj.pub_date.month\n\n\nclass WhooshBoostMockSearchIndex(indexes.SearchIndex, indexes.Indexable):\n    text = indexes.CharField(\n        document=True,\n        use_template=True,\n        template_name=\"search/indexes/core/mockmodel_template.txt\",\n    )\n    author = indexes.CharField(model_attr=\"author\", weight=2.0)\n    editor = indexes.CharField(model_attr=\"editor\")\n    pub_date = indexes.DateTimeField(model_attr=\"pub_date\")\n\n    def get_model(self):\n        return AFourthMockModel\n\n    def prepare(self, obj):\n        data = super().prepare(obj)\n\n        if obj.pk % 2 == 0:\n            data[\"boost\"] = 2.0\n\n        return data\n\n\nclass WhooshAutocompleteMockModelSearchIndex(indexes.SearchIndex, indexes.Indexable):\n    text = indexes.CharField(model_attr=\"foo\", document=True)\n    name = indexes.CharField(model_attr=\"author\")\n    pub_date = indexes.DateTimeField(model_attr=\"pub_date\")\n    text_auto = indexes.EdgeNgramField(model_attr=\"foo\")\n    name_auto = indexes.EdgeNgramField(model_attr=\"author\")\n\n    def get_model(self):\n        return MockModel\n\n\nclass WhooshSearchBackendTestCase(WhooshTestCase):\n    fixtures = [\"bulk_data.json\"]\n\n    def setUp(self):\n        super().setUp()\n\n        self.old_ui = connections[\"whoosh\"].get_unified_index()\n        self.ui = UnifiedIndex()\n        self.wmmi = WhooshMockSearchIndex()\n        self.wmmidni = WhooshMockSearchIndexWithSkipDocument()\n        self.wmtmmi = WhooshMaintainTypeMockSearchIndex()\n        self.ui.build(indexes=[self.wmmi])\n        self.sb = connections[\"whoosh\"].get_backend()\n        connections[\"whoosh\"]._index = self.ui\n\n        self.sb.setup()\n        self.raw_whoosh = self.sb.index\n        self.parser = QueryParser(self.sb.content_field_name, schema=self.sb.schema)\n        self.sb.delete_index()\n\n        self.sample_objs = MockModel.objects.all()\n\n    def tearDown(self):\n        connections[\"whoosh\"]._index = self.old_ui\n        super().tearDown()\n\n    def whoosh_search(self, query):\n        self.raw_whoosh = self.raw_whoosh.refresh()\n        searcher = self.raw_whoosh.searcher()\n        return searcher.search(self.parser.parse(query), limit=1000)\n\n    def test_non_silent(self):\n        bad_sb = connections[\"whoosh\"].backend(\n            \"bad\", PATH=\"/tmp/bad_whoosh\", SILENTLY_FAIL=False\n        )\n        bad_sb.use_file_storage = False\n        bad_sb.storage = \"omg.wtf.bbq\"\n\n        try:\n            bad_sb.update(self.wmmi, self.sample_objs)\n            self.fail()\n        except:\n            pass\n\n        try:\n            bad_sb.remove(\"core.mockmodel.1\")\n            self.fail()\n        except:\n            pass\n\n        try:\n            bad_sb.clear()\n            self.fail()\n        except:\n            pass\n\n        try:\n            bad_sb.search(\"foo\")\n            self.fail()\n        except:\n            pass\n\n    def test_update(self):\n        self.sb.update(self.wmmi, self.sample_objs)\n\n        # Check what Whoosh thinks is there.\n        self.assertEqual(len(self.whoosh_search(\"*\")), 23)\n        self.assertEqual(\n            [doc.fields()[\"id\"] for doc in self.whoosh_search(\"*\")],\n            [\"core.mockmodel.%s\" % i for i in range(1, 24)],\n        )\n\n    def test_update_with_SkipDocument_raised(self):\n        self.sb.update(self.wmmidni, self.sample_objs)\n\n        # Check what Whoosh thinks is there.\n        res = self.whoosh_search(\"*\")\n        self.assertEqual(len(res), 14)\n        ids = [1, 2, 5, 6, 7, 8, 9, 11, 12, 14, 15, 18, 20, 21]\n        self.assertListEqual(\n            [doc.fields()[\"id\"] for doc in res], [\"core.mockmodel.%s\" % i for i in ids]\n        )\n\n    def test_remove(self):\n        self.sb.update(self.wmmi, self.sample_objs)\n        self.assertEqual(self.sb.index.doc_count(), 23)\n\n        self.sb.remove(self.sample_objs[0])\n        self.assertEqual(self.sb.index.doc_count(), 22)\n\n    def test_clear(self):\n        self.sb.update(self.wmmi, self.sample_objs)\n        self.assertEqual(self.sb.index.doc_count(), 23)\n\n        self.sb.clear()\n        self.assertEqual(self.sb.index.doc_count(), 0)\n\n        self.sb.update(self.wmmi, self.sample_objs)\n        self.assertEqual(self.sb.index.doc_count(), 23)\n\n        self.sb.clear([AnotherMockModel])\n        self.assertEqual(self.sb.index.doc_count(), 23)\n\n        self.sb.clear([MockModel])\n        self.assertEqual(self.sb.index.doc_count(), 0)\n\n        self.sb.index.refresh()\n        self.sb.update(self.wmmi, self.sample_objs)\n        self.assertEqual(self.sb.index.doc_count(), 23)\n\n        self.sb.clear([AnotherMockModel, MockModel])\n        self.assertEqual(self.raw_whoosh.doc_count(), 0)\n\n    def test_search(self):\n        self.sb.update(self.wmmi, self.sample_objs)\n        self.assertEqual(len(self.whoosh_search(\"*\")), 23)\n\n        # No query string should always yield zero results.\n        self.assertEqual(self.sb.search(\"\"), {\"hits\": 0, \"results\": []})\n\n        # A one letter query string gets nabbed by a stopwords filter. Should\n        # always yield zero results.\n        self.assertEqual(self.sb.search(\"a\"), {\"hits\": 0, \"results\": []})\n\n        # Possible AttributeError?\n        # self.assertEqual(self.sb.search(u'a b'), {'hits': 0, 'results': [], 'spelling_suggestion': '', 'facets': {}})\n\n        self.assertEqual(self.sb.search(\"*\")[\"hits\"], 23)\n        self.assertEqual(\n            [result.pk for result in self.sb.search(\"*\")[\"results\"]],\n            [\"%s\" % i for i in range(1, 24)],\n        )\n\n        self.assertEqual(self.sb.search(\"Indexe\")[\"hits\"], 23)\n        self.assertEqual(self.sb.search(\"Indexe\")[\"spelling_suggestion\"], \"indexed\")\n\n        self.assertEqual(\n            self.sb.search(\"\", facets=[\"name\"]), {\"hits\": 0, \"results\": []}\n        )\n        results = self.sb.search(\"Index*\", facets=[\"name\"])\n        results = self.sb.search(\"index*\", facets=[\"name\"])\n        self.assertEqual(results[\"hits\"], 23)\n        self.assertEqual(results[\"facets\"], {})\n\n        self.assertEqual(\n            self.sb.search(\n                \"\",\n                date_facets={\n                    \"pub_date\": {\n                        \"start_date\": date(2008, 2, 26),\n                        \"end_date\": date(2008, 2, 26),\n                        \"gap\": \"/MONTH\",\n                    }\n                },\n            ),\n            {\"hits\": 0, \"results\": []},\n        )\n        results = self.sb.search(\n            \"Index*\",\n            date_facets={\n                \"pub_date\": {\n                    \"start_date\": date(2008, 2, 26),\n                    \"end_date\": date(2008, 2, 26),\n                    \"gap\": \"/MONTH\",\n                }\n            },\n        )\n        results = self.sb.search(\n            \"index*\",\n            date_facets={\n                \"pub_date\": {\n                    \"start_date\": date(2008, 2, 26),\n                    \"end_date\": date(2008, 2, 26),\n                    \"gap\": \"/MONTH\",\n                }\n            },\n        )\n        self.assertEqual(results[\"hits\"], 23)\n        self.assertEqual(results[\"facets\"], {})\n\n        self.assertEqual(\n            self.sb.search(\"\", query_facets={\"name\": \"[* TO e]\"}),\n            {\"hits\": 0, \"results\": []},\n        )\n        results = self.sb.search(\"Index*\", query_facets={\"name\": \"[* TO e]\"})\n        results = self.sb.search(\"index*\", query_facets={\"name\": \"[* TO e]\"})\n        self.assertEqual(results[\"hits\"], 23)\n        self.assertEqual(results[\"facets\"], {})\n\n        # self.assertEqual(self.sb.search('', narrow_queries=set(['name:daniel1'])), {'hits': 0, 'results': []})\n        # results = self.sb.search('Index*', narrow_queries=set(['name:daniel1']))\n        # self.assertEqual(results['hits'], 1)\n\n        # Ensure that swapping the ``result_class`` works.\n        self.assertTrue(\n            isinstance(\n                self.sb.search(\"Index*\", result_class=MockSearchResult)[\"results\"][0],\n                MockSearchResult,\n            )\n        )\n\n        # Check the use of ``limit_to_registered_models``.\n        self.assertEqual(\n            self.sb.search(\"\", limit_to_registered_models=False),\n            {\"hits\": 0, \"results\": []},\n        )\n        self.assertEqual(\n            self.sb.search(\"*\", limit_to_registered_models=False)[\"hits\"], 23\n        )\n        self.assertEqual(\n            [\n                result.pk\n                for result in self.sb.search(\"*\", limit_to_registered_models=False)[\n                    \"results\"\n                ]\n            ],\n            [\"%s\" % i for i in range(1, 24)],\n        )\n\n        # Stow.\n        old_limit_to_registered_models = getattr(\n            settings, \"HAYSTACK_LIMIT_TO_REGISTERED_MODELS\", True\n        )\n        settings.HAYSTACK_LIMIT_TO_REGISTERED_MODELS = False\n\n        self.assertEqual(self.sb.search(\"\"), {\"hits\": 0, \"results\": []})\n        self.assertEqual(self.sb.search(\"*\")[\"hits\"], 23)\n        self.assertEqual(\n            [result.pk for result in self.sb.search(\"*\")[\"results\"]],\n            [\"%s\" % i for i in range(1, 24)],\n        )\n\n        # Restore.\n        settings.HAYSTACK_LIMIT_TO_REGISTERED_MODELS = old_limit_to_registered_models\n\n    def test_highlight(self):\n        self.sb.update(self.wmmi, self.sample_objs)\n        self.assertEqual(len(self.whoosh_search(\"*\")), 23)\n\n        self.assertEqual(self.sb.search(\"\", highlight=True), {\"hits\": 0, \"results\": []})\n        self.assertEqual(self.sb.search(\"index*\", highlight=True)[\"hits\"], 23)\n\n        query = self.sb.search(\"Index*\", highlight=True)[\"results\"]\n        result = [result.highlighted[\"text\"][0] for result in query]\n\n        self.assertEqual(result, [\"<em>Indexed</em>!\\n%d\" % i for i in range(1, 24)])\n\n    def test_search_all_models(self):\n        wamsi = WhooshAnotherMockSearchIndex()\n        self.ui.build(indexes=[self.wmmi, wamsi])\n\n        self.sb.update(self.wmmi, self.sample_objs)\n        self.sb.update(wamsi, AnotherMockModel.objects.all())\n\n        self.assertEqual(len(self.whoosh_search(\"*\")), 25)\n\n        self.ui.build(indexes=[self.wmmi])\n\n    def test_more_like_this(self):\n        self.sb.update(self.wmmi, self.sample_objs)\n        self.assertEqual(len(self.whoosh_search(\"*\")), 23)\n\n        # Now supported by Whoosh (as of 1.8.4). See the ``LiveWhooshMoreLikeThisTestCase``.\n        self.assertEqual(self.sb.more_like_this(self.sample_objs[0])[\"hits\"], 22)\n\n        # Make sure that swapping the ``result_class`` doesn't blow up.\n        try:\n            self.sb.more_like_this(self.sample_objs[0], result_class=MockSearchResult)\n        except:\n            self.fail()\n\n    def test_delete_index(self):\n        self.sb.update(self.wmmi, self.sample_objs)\n        self.assertTrue(self.sb.index.doc_count() > 0)\n\n        self.sb.delete_index()\n        self.assertEqual(self.sb.index.doc_count(), 0)\n\n    def test_order_by(self):\n        self.sb.update(self.wmmi, self.sample_objs)\n\n        results = self.sb.search(\"*\", sort_by=[\"pub_date\"])\n        self.assertEqual(\n            [result.pk for result in results[\"results\"]],\n            [\n                \"1\",\n                \"3\",\n                \"2\",\n                \"4\",\n                \"5\",\n                \"6\",\n                \"7\",\n                \"8\",\n                \"9\",\n                \"10\",\n                \"11\",\n                \"12\",\n                \"13\",\n                \"14\",\n                \"15\",\n                \"16\",\n                \"17\",\n                \"18\",\n                \"19\",\n                \"20\",\n                \"21\",\n                \"22\",\n                \"23\",\n            ],\n        )\n\n        results = self.sb.search(\"*\", sort_by=[\"-pub_date\"])\n        self.assertEqual(\n            [result.pk for result in results[\"results\"]],\n            [\n                \"23\",\n                \"22\",\n                \"21\",\n                \"20\",\n                \"19\",\n                \"18\",\n                \"17\",\n                \"16\",\n                \"15\",\n                \"14\",\n                \"13\",\n                \"12\",\n                \"11\",\n                \"10\",\n                \"9\",\n                \"8\",\n                \"7\",\n                \"6\",\n                \"5\",\n                \"4\",\n                \"2\",\n                \"3\",\n                \"1\",\n            ],\n        )\n\n        results = self.sb.search(\"*\", sort_by=[\"id\"])\n        self.assertEqual(\n            [result.pk for result in results[\"results\"]],\n            [\n                \"1\",\n                \"10\",\n                \"11\",\n                \"12\",\n                \"13\",\n                \"14\",\n                \"15\",\n                \"16\",\n                \"17\",\n                \"18\",\n                \"19\",\n                \"2\",\n                \"20\",\n                \"21\",\n                \"22\",\n                \"23\",\n                \"3\",\n                \"4\",\n                \"5\",\n                \"6\",\n                \"7\",\n                \"8\",\n                \"9\",\n            ],\n        )\n\n        results = self.sb.search(\"*\", sort_by=[\"-id\"])\n        self.assertEqual(\n            [result.pk for result in results[\"results\"]],\n            [\n                \"9\",\n                \"8\",\n                \"7\",\n                \"6\",\n                \"5\",\n                \"4\",\n                \"3\",\n                \"23\",\n                \"22\",\n                \"21\",\n                \"20\",\n                \"2\",\n                \"19\",\n                \"18\",\n                \"17\",\n                \"16\",\n                \"15\",\n                \"14\",\n                \"13\",\n                \"12\",\n                \"11\",\n                \"10\",\n                \"1\",\n            ],\n        )\n\n        results = self.sb.search(\"*\", sort_by=[\"-pub_date\", \"-id\"])\n        self.assertEqual(\n            [result.pk for result in results[\"results\"]],\n            [\n                \"23\",\n                \"22\",\n                \"21\",\n                \"20\",\n                \"19\",\n                \"18\",\n                \"17\",\n                \"16\",\n                \"15\",\n                \"14\",\n                \"13\",\n                \"12\",\n                \"11\",\n                \"10\",\n                \"9\",\n                \"8\",\n                \"7\",\n                \"6\",\n                \"5\",\n                \"4\",\n                \"2\",\n                \"3\",\n                \"1\",\n            ],\n        )\n\n        self.assertRaises(\n            SearchBackendError, self.sb.search, \"*\", sort_by=[\"-pub_date\", \"id\"]\n        )\n\n    def test__from_python(self):\n        self.assertEqual(self.sb._from_python(\"abc\"), \"abc\")\n        self.assertEqual(self.sb._from_python(1), 1)\n        self.assertEqual(self.sb._from_python(2653), 2653)\n        self.assertEqual(self.sb._from_python(25.5), 25.5)\n        self.assertEqual(self.sb._from_python([1, 2, 3]), \"1,2,3\")\n        self.assertTrue(\"a': 1\" in self.sb._from_python({\"a\": 1, \"c\": 3, \"b\": 2}))\n        self.assertEqual(\n            self.sb._from_python(datetime(2009, 5, 9, 16, 14)),\n            datetime(2009, 5, 9, 16, 14),\n        )\n        self.assertEqual(\n            self.sb._from_python(datetime(2009, 5, 9, 0, 0)), datetime(2009, 5, 9, 0, 0)\n        )\n        self.assertEqual(\n            self.sb._from_python(datetime(1899, 5, 18, 0, 0)),\n            datetime(1899, 5, 18, 0, 0),\n        )\n        self.assertEqual(\n            self.sb._from_python(datetime(2009, 5, 18, 1, 16, 30, 250)),\n            datetime(2009, 5, 18, 1, 16, 30, 250),\n        )\n\n    def test__to_python(self):\n        self.assertEqual(self.sb._to_python(\"abc\"), \"abc\")\n        self.assertEqual(self.sb._to_python(\"1\"), 1)\n        self.assertEqual(self.sb._to_python(\"2653\"), 2653)\n        self.assertEqual(self.sb._to_python(\"25.5\"), 25.5)\n        self.assertEqual(self.sb._to_python(\"[1, 2, 3]\"), [1, 2, 3])\n        self.assertEqual(\n            self.sb._to_python('{\"a\": 1, \"b\": 2, \"c\": 3}'), {\"a\": 1, \"c\": 3, \"b\": 2}\n        )\n        self.assertEqual(\n            self.sb._to_python(\"2009-05-09T16:14:00\"), datetime(2009, 5, 9, 16, 14)\n        )\n        self.assertEqual(\n            self.sb._to_python(\"2009-05-09T00:00:00\"), datetime(2009, 5, 9, 0, 0)\n        )\n        self.assertEqual(self.sb._to_python(None), None)\n\n    def test_range_queries(self):\n        self.sb.update(self.wmmi, self.sample_objs)\n\n        self.assertEqual(len(self.whoosh_search(\"[d TO]\")), 23)\n        self.assertEqual(len(self.whoosh_search(\"name:[d TO]\")), 23)\n        self.assertEqual(len(self.whoosh_search(\"Ind* AND name:[d to]\")), 23)\n        self.assertEqual(len(self.whoosh_search(\"Ind* AND name:[to c]\")), 0)\n\n    def test_date_queries(self):\n        self.sb.update(self.wmmi, self.sample_objs)\n\n        self.assertEqual(len(self.whoosh_search(\"pub_date:20090717003000\")), 1)\n        self.assertEqual(len(self.whoosh_search(\"pub_date:20090717000000\")), 0)\n        self.assertEqual(\n            len(self.whoosh_search(\"Ind* AND pub_date:[to 20090717003000]\")), 3\n        )\n\n    def test_escaped_characters_queries(self):\n        self.sb.update(self.wmmi, self.sample_objs)\n\n        self.assertEqual(len(self.whoosh_search(\"Indexed\\!\")), 23)\n        self.assertEqual(len(self.whoosh_search(\"http\\:\\/\\/www\\.example\\.com\")), 0)\n\n    def test_build_schema(self):\n        ui = UnifiedIndex()\n        ui.build(indexes=[AllTypesWhooshMockSearchIndex()])\n\n        (content_field_name, schema) = self.sb.build_schema(ui.all_searchfields())\n        self.assertEqual(content_field_name, \"text\")\n\n        schema_names = set(schema.names())\n        required_schema = {\n            \"django_ct\",\n            \"django_id\",\n            \"id\",\n            \"is_active\",\n            \"name\",\n            \"pub_date\",\n            \"seen_count\",\n            \"sites\",\n            \"text\",\n        }\n        self.assertTrue(required_schema.issubset(schema_names))\n\n        self.assertIsInstance(schema._fields[\"text\"], TEXT)\n        self.assertIsInstance(schema._fields[\"pub_date\"], DATETIME)\n        self.assertIsInstance(schema._fields[\"seen_count\"], NUMERIC)\n        self.assertIsInstance(schema._fields[\"sites\"], KEYWORD)\n        self.assertIsInstance(schema._fields[\"is_active\"], BOOLEAN)\n\n    def test_verify_type(self):\n        old_ui = connections[\"whoosh\"].get_unified_index()\n        ui = UnifiedIndex()\n        wmtmmi = WhooshMaintainTypeMockSearchIndex()\n        ui.build(indexes=[wmtmmi])\n        connections[\"whoosh\"]._index = ui\n        sb = connections[\"whoosh\"].get_backend()\n        sb.setup()\n        sb.update(wmtmmi, self.sample_objs)\n\n        self.assertEqual(sb.search(\"*\")[\"hits\"], 23)\n        self.assertEqual(\n            [result.month for result in sb.search(\"*\")[\"results\"]],\n            [\n                \"06\",\n                \"07\",\n                \"06\",\n                \"07\",\n                \"07\",\n                \"07\",\n                \"07\",\n                \"07\",\n                \"07\",\n                \"07\",\n                \"07\",\n                \"07\",\n                \"07\",\n                \"07\",\n                \"07\",\n                \"07\",\n                \"07\",\n                \"07\",\n                \"07\",\n                \"07\",\n                \"07\",\n                \"07\",\n                \"07\",\n            ],\n        )\n        connections[\"whoosh\"]._index = old_ui\n\n    @unittest.skipIf(\n        settings.HAYSTACK_CONNECTIONS[\"whoosh\"].get(\"STORAGE\") != \"file\",\n        \"testing writability requires Whoosh to use STORAGE=file\",\n    )\n    def test_writable(self):\n        if not os.path.exists(settings.HAYSTACK_CONNECTIONS[\"whoosh\"][\"PATH\"]):\n            os.makedirs(settings.HAYSTACK_CONNECTIONS[\"whoosh\"][\"PATH\"])\n\n        os.chmod(settings.HAYSTACK_CONNECTIONS[\"whoosh\"][\"PATH\"], 0o400)\n\n        try:\n            self.sb.setup()\n            self.fail()\n        except IOError:\n            # Yay. We failed\n            pass\n\n        os.chmod(settings.HAYSTACK_CONNECTIONS[\"whoosh\"][\"PATH\"], 0o755)\n\n    def test_slicing(self):\n        self.sb.update(self.wmmi, self.sample_objs)\n\n        page_1 = self.sb.search(\"*\", start_offset=0, end_offset=20)\n        page_2 = self.sb.search(\"*\", start_offset=20, end_offset=30)\n        self.assertEqual(len(page_1[\"results\"]), 20)\n        self.assertEqual(\n            [result.pk for result in page_1[\"results\"]],\n            [\"%s\" % i for i in range(1, 21)],\n        )\n        self.assertEqual(len(page_2[\"results\"]), 3)\n        self.assertEqual(\n            [result.pk for result in page_2[\"results\"]], [\"21\", \"22\", \"23\"]\n        )\n\n        # This used to throw an error.\n        page_0 = self.sb.search(\"*\", start_offset=0, end_offset=0)\n        self.assertEqual(len(page_0[\"results\"]), 1)\n\n    @unittest.expectedFailure\n    def test_scoring(self):\n        self.sb.update(self.wmmi, self.sample_objs)\n\n        page_1 = self.sb.search(\"index\", start_offset=0, end_offset=20)\n        page_2 = self.sb.search(\"index\", start_offset=20, end_offset=30)\n        self.assertEqual(len(page_1[\"results\"]), 20)\n        self.assertEqual(\n            [\"%0.2f\" % result.score for result in page_1[\"results\"]],\n            [\n                \"0.51\",\n                \"0.51\",\n                \"0.51\",\n                \"0.51\",\n                \"0.51\",\n                \"0.51\",\n                \"0.51\",\n                \"0.51\",\n                \"0.51\",\n                \"0.40\",\n                \"0.40\",\n                \"0.40\",\n                \"0.40\",\n                \"0.40\",\n                \"0.40\",\n                \"0.40\",\n                \"0.40\",\n                \"0.40\",\n                \"0.40\",\n                \"0.40\",\n            ],\n        )\n        self.assertEqual(len(page_2[\"results\"]), 3)\n        self.assertEqual(\n            [\"%0.2f\" % result.score for result in page_2[\"results\"]],\n            [\"0.40\", \"0.40\", \"0.40\"],\n        )\n\n    def test_analyzed_fields(self):\n        self.sb.update(self.wmmi, self.sample_objs)\n        results = self.whoosh_search(\"name_analyzed:1234daniel5678\")\n        self.assertEqual(len(results), 23)\n\n\nclass WhooshBoostBackendTestCase(WhooshTestCase):\n    def setUp(self):\n        super().setUp()\n\n        self.old_ui = connections[\"whoosh\"].get_unified_index()\n        self.ui = UnifiedIndex()\n        self.wmmi = WhooshBoostMockSearchIndex()\n        self.ui.build(indexes=[self.wmmi])\n        self.sb = connections[\"whoosh\"].get_backend()\n        connections[\"whoosh\"]._index = self.ui\n\n        self.sb.setup()\n        self.raw_whoosh = self.sb.index\n        self.parser = QueryParser(self.sb.content_field_name, schema=self.sb.schema)\n        self.sb.delete_index()\n        self.sample_objs = []\n\n        for i in range(1, 5):\n            mock = AFourthMockModel()\n            mock.id = i\n\n            if i % 2:\n                mock.author = \"daniel\"\n                mock.editor = \"david\"\n            else:\n                mock.author = \"david\"\n                mock.editor = \"daniel\"\n\n            mock.pub_date = date(2009, 2, 25) - timedelta(days=i)\n            self.sample_objs.append(mock)\n\n    def tearDown(self):\n        connections[\"whoosh\"]._index = self.ui\n        super().tearDown()\n\n    @unittest.expectedFailure\n    def test_boost(self):\n        self.sb.update(self.wmmi, self.sample_objs)\n        self.raw_whoosh = self.raw_whoosh.refresh()\n        searcher = self.raw_whoosh.searcher()\n        self.assertEqual(len(searcher.search(self.parser.parse(\"*\"), limit=1000)), 2)\n\n        results = SearchQuerySet(\"whoosh\").filter(\n            SQ(author=\"daniel\") | SQ(editor=\"daniel\")\n        )\n\n        self.assertEqual(\n            [result.id for result in results],\n            [\"core.afourthmockmodel.1\", \"core.afourthmockmodel.3\"],\n        )\n        self.assertEqual(results[0].boost, 1.1)\n\n\nclass LiveWhooshSearchQueryTestCase(WhooshTestCase):\n    def setUp(self):\n        super().setUp()\n\n        # Stow.\n        self.old_ui = connections[\"whoosh\"].get_unified_index()\n        self.ui = UnifiedIndex()\n        self.wmmi = WhooshMockSearchIndex()\n        self.wmtmmi = WhooshMaintainTypeMockSearchIndex()\n        self.ui.build(indexes=[self.wmmi])\n        self.sb = connections[\"whoosh\"].get_backend()\n        connections[\"whoosh\"]._index = self.ui\n\n        self.sb.setup()\n        self.raw_whoosh = self.sb.index\n        self.parser = QueryParser(self.sb.content_field_name, schema=self.sb.schema)\n        self.sb.delete_index()\n\n        self.sample_objs = []\n\n        for i in range(1, 4):\n            mock = MockModel()\n            mock.id = i\n            mock.author = \"daniel%s\" % i\n            mock.pub_date = date(2009, 2, 25) - timedelta(days=i)\n            self.sample_objs.append(mock)\n\n        self.sq = connections[\"whoosh\"].get_query()\n\n    def tearDown(self):\n        connections[\"whoosh\"]._index = self.old_ui\n        super().tearDown()\n\n    def test_get_spelling(self):\n        self.sb.update(self.wmmi, self.sample_objs)\n\n        self.sq.add_filter(SQ(content=\"Indexe\"))\n        self.assertEqual(self.sq.get_spelling_suggestion(), \"indexed\")\n\n    def test_log_query(self):\n        from django.conf import settings\n\n        reset_search_queries()\n        self.assertEqual(len(connections[\"whoosh\"].queries), 0)\n\n        # Stow.\n\n        with self.settings(DEBUG=False):\n            len(self.sq.get_results())\n            self.assertEqual(len(connections[\"whoosh\"].queries), 0)\n\n        with self.settings(DEBUG=True):\n            # Redefine it to clear out the cached results.\n            self.sq = connections[\"whoosh\"].get_query()\n            self.sq.add_filter(SQ(name=\"bar\"))\n            len(self.sq.get_results())\n            self.assertEqual(len(connections[\"whoosh\"].queries), 1)\n            self.assertEqual(\n                connections[\"whoosh\"].queries[0][\"query_string\"], \"name:(bar)\"\n            )\n\n            # And again, for good measure.\n            self.sq = connections[\"whoosh\"].get_query()\n            self.sq.add_filter(SQ(name=\"baz\"))\n            self.sq.add_filter(SQ(text=\"foo\"))\n            len(self.sq.get_results())\n            self.assertEqual(len(connections[\"whoosh\"].queries), 2)\n            self.assertEqual(\n                connections[\"whoosh\"].queries[0][\"query_string\"], \"name:(bar)\"\n            )\n            self.assertEqual(\n                connections[\"whoosh\"].queries[1][\"query_string\"],\n                \"(name:(baz) AND text:(foo))\",\n            )\n\n\n@override_settings(DEBUG=True)\nclass LiveWhooshSearchQuerySetTestCase(WhooshTestCase):\n    def setUp(self):\n        super().setUp()\n\n        # Stow.\n        self.old_ui = connections[\"whoosh\"].get_unified_index()\n        self.ui = UnifiedIndex()\n        self.wmmi = WhooshMockSearchIndex()\n        self.ui.build(indexes=[self.wmmi])\n        self.sb = connections[\"whoosh\"].get_backend()\n        connections[\"whoosh\"]._index = self.ui\n\n        self.sb.setup()\n        self.raw_whoosh = self.sb.index\n        self.parser = QueryParser(self.sb.content_field_name, schema=self.sb.schema)\n        self.sb.delete_index()\n\n        self.sample_objs = []\n\n        for i in range(1, 4):\n            mock = MockModel()\n            mock.id = i\n            mock.author = \"daniel%s\" % i\n            mock.pub_date = date(2009, 2, 25) - timedelta(days=i)\n            self.sample_objs.append(mock)\n\n        self.sq = connections[\"whoosh\"].get_query()\n        self.sqs = SearchQuerySet(\"whoosh\")\n\n    def tearDown(self):\n        connections[\"whoosh\"]._index = self.old_ui\n        super().tearDown()\n\n    def test_various_searchquerysets(self):\n        self.sb.update(self.wmmi, self.sample_objs)\n\n        sqs = self.sqs.filter(content=\"Index\")\n        self.assertEqual(sqs.query.build_query(), \"(Index)\")\n        self.assertEqual(len(sqs), 3)\n\n        sqs = self.sqs.auto_query(\"Indexed!\")\n        self.assertEqual(sqs.query.build_query(), \"('Indexed!')\")\n        self.assertEqual(len(sqs), 3)\n\n        sqs = self.sqs.auto_query(\"Indexed!\").filter(pub_date__lte=date(2009, 8, 31))\n        self.assertEqual(\n            sqs.query.build_query(), \"(('Indexed!') AND pub_date:([to 20090831000000]))\"\n        )\n        self.assertEqual(len(sqs), 3)\n\n        sqs = self.sqs.auto_query(\"Indexed!\").filter(pub_date__lte=date(2009, 2, 23))\n        self.assertEqual(\n            sqs.query.build_query(), \"(('Indexed!') AND pub_date:([to 20090223000000]))\"\n        )\n        self.assertEqual(len(sqs), 2)\n\n        sqs = (\n            self.sqs.auto_query(\"Indexed!\")\n            .filter(pub_date__lte=date(2009, 2, 25))\n            .filter(django_id__in=[1, 2])\n            .exclude(name=\"daniel1\")\n        )\n        self.assertEqual(\n            sqs.query.build_query(),\n            \"(('Indexed!') AND pub_date:([to 20090225000000]) AND django_id:(1 OR 2) AND NOT (name:(daniel1)))\",\n        )\n        self.assertEqual(len(sqs), 1)\n\n        sqs = self.sqs.auto_query(\"re-inker\")\n        self.assertEqual(sqs.query.build_query(), \"('re-inker')\")\n        self.assertEqual(len(sqs), 0)\n\n        sqs = self.sqs.auto_query(\"0.7 wire\")\n        self.assertEqual(sqs.query.build_query(), \"('0.7' wire)\")\n        self.assertEqual(len(sqs), 0)\n\n        sqs = self.sqs.auto_query(\"daler-rowney pearlescent 'bell bronze'\")\n        self.assertEqual(\n            sqs.query.build_query(), \"('daler-rowney' pearlescent 'bell bronze')\"\n        )\n        self.assertEqual(len(sqs), 0)\n\n        sqs = self.sqs.models(MockModel)\n        self.assertEqual(sqs.query.build_query(), \"*\")\n        self.assertEqual(len(sqs), 3)\n\n    def test_all_regression(self):\n        sqs = SearchQuerySet(\"whoosh\")\n        self.assertEqual([result.pk for result in sqs], [])\n\n        self.sb.update(self.wmmi, self.sample_objs)\n        self.assertTrue(self.sb.index.doc_count() > 0)\n\n        sqs = SearchQuerySet(\"whoosh\")\n        self.assertEqual(len(sqs), 3)\n        self.assertEqual(sorted([result.pk for result in sqs]), [\"1\", \"2\", \"3\"])\n\n        try:\n            sqs = repr(SearchQuerySet(\"whoosh\"))\n        except:\n            self.fail()\n\n    def test_regression_space_query(self):\n        self.sb.update(self.wmmi, self.sample_objs)\n        self.assertTrue(self.sb.index.doc_count() > 0)\n\n        sqs = SearchQuerySet(\"whoosh\").auto_query(\" \")\n        self.assertEqual(len(sqs), 3)\n        sqs = SearchQuerySet(\"whoosh\").filter(content=\" \")\n        self.assertEqual(len(sqs), 0)\n\n    def test_iter(self):\n        self.sb.update(self.wmmi, self.sample_objs)\n\n        reset_search_queries()\n        self.assertEqual(len(connections[\"whoosh\"].queries), 0)\n        sqs = self.sqs.auto_query(\"Indexed!\")\n        results = [int(result.pk) for result in iter(sqs)]\n        self.assertEqual(sorted(results), [1, 2, 3])\n        self.assertEqual(len(connections[\"whoosh\"].queries), 1)\n\n    def test_slice(self):\n        self.sb.update(self.wmmi, self.sample_objs)\n\n        reset_search_queries()\n        self.assertEqual(len(connections[\"whoosh\"].queries), 0)\n        results = self.sqs.auto_query(\"Indexed!\")\n        self.assertEqual(sorted([int(result.pk) for result in results[1:3]]), [1, 2])\n        self.assertEqual(len(connections[\"whoosh\"].queries), 1)\n\n        reset_search_queries()\n        self.assertEqual(len(connections[\"whoosh\"].queries), 0)\n        results = self.sqs.auto_query(\"Indexed!\")\n        self.assertEqual(int(results[0].pk), 1)\n        self.assertEqual(len(connections[\"whoosh\"].queries), 1)\n\n    def test_values_slicing(self):\n        self.sb.update(self.wmmi, self.sample_objs)\n\n        reset_search_queries()\n        self.assertEqual(len(connections[\"whoosh\"].queries), 0)\n\n        # TODO: this would be a good candidate for refactoring into a TestCase subclass shared across backends\n\n        # The values will come back as strings because Hasytack doesn't assume PKs are integers.\n        # We'll prepare this set once since we're going to query the same results in multiple ways:\n        expected_pks = [\"3\", \"2\", \"1\"]\n\n        results = self.sqs.all().order_by(\"pub_date\").values(\"pk\")\n        self.assertListEqual([i[\"pk\"] for i in results[1:11]], expected_pks)\n\n        results = self.sqs.all().order_by(\"pub_date\").values_list(\"pk\")\n        self.assertListEqual([i[0] for i in results[1:11]], expected_pks)\n\n        results = self.sqs.all().order_by(\"pub_date\").values_list(\"pk\", flat=True)\n        self.assertListEqual(results[1:11], expected_pks)\n\n        self.assertEqual(len(connections[\"whoosh\"].queries), 3)\n\n    def test_manual_iter(self):\n        self.sb.update(self.wmmi, self.sample_objs)\n        results = self.sqs.auto_query(\"Indexed!\")\n\n        reset_search_queries()\n        self.assertEqual(len(connections[\"whoosh\"].queries), 0)\n        results = [int(result.pk) for result in results._manual_iter()]\n        self.assertEqual(sorted(results), [1, 2, 3])\n        self.assertEqual(len(connections[\"whoosh\"].queries), 1)\n\n    def test_fill_cache(self):\n        self.sb.update(self.wmmi, self.sample_objs)\n\n        reset_search_queries()\n        self.assertEqual(len(connections[\"whoosh\"].queries), 0)\n        results = self.sqs.auto_query(\"Indexed!\")\n        self.assertEqual(len(results._result_cache), 0)\n        self.assertEqual(len(connections[\"whoosh\"].queries), 0)\n        results._fill_cache(0, 10)\n        self.assertEqual(\n            len([result for result in results._result_cache if result is not None]), 3\n        )\n        self.assertEqual(len(connections[\"whoosh\"].queries), 1)\n        results._fill_cache(10, 20)\n        self.assertEqual(\n            len([result for result in results._result_cache if result is not None]), 3\n        )\n        self.assertEqual(len(connections[\"whoosh\"].queries), 2)\n\n    def test_cache_is_full(self):\n        self.sb.update(self.wmmi, self.sample_objs)\n\n        reset_search_queries()\n        self.assertEqual(len(connections[\"whoosh\"].queries), 0)\n        self.assertEqual(self.sqs._cache_is_full(), False)\n        results = self.sqs.auto_query(\"Indexed!\")\n        result_list = [i for i in iter(results)]\n        self.assertEqual(results._cache_is_full(), True)\n        self.assertEqual(len(connections[\"whoosh\"].queries), 1)\n\n    def test_count(self):\n        more_samples = []\n\n        for i in range(1, 50):\n            mock = MockModel()\n            mock.id = i\n            mock.author = \"daniel%s\" % i\n            mock.pub_date = date(2009, 2, 25) - timedelta(days=i)\n            more_samples.append(mock)\n\n        self.sb.update(self.wmmi, more_samples)\n\n        reset_search_queries()\n        self.assertEqual(len(connections[\"whoosh\"].queries), 0)\n        results = self.sqs.all()\n        self.assertEqual(len(results), 49)\n        self.assertEqual(results._cache_is_full(), False)\n        self.assertEqual(len(connections[\"whoosh\"].queries), 1)\n\n    def test_query_generation(self):\n        sqs = self.sqs.filter(\n            SQ(content=AutoQuery(\"hello world\")) | SQ(title=AutoQuery(\"hello world\"))\n        )\n        self.assertEqual(\n            sqs.query.build_query(), \"((hello world) OR title:(hello world))\"\n        )\n\n    def test_result_class(self):\n        self.sb.update(self.wmmi, self.sample_objs)\n\n        # Assert that we're defaulting to ``SearchResult``.\n        sqs = self.sqs.all()\n        self.assertTrue(isinstance(sqs[0], SearchResult))\n\n        # Custom class.\n        sqs = self.sqs.result_class(MockSearchResult).all()\n        self.assertTrue(isinstance(sqs[0], MockSearchResult))\n\n        # Reset to default.\n        sqs = self.sqs.result_class(None).all()\n        self.assertTrue(isinstance(sqs[0], SearchResult))\n\n\nclass LiveWhooshMultiSearchQuerySetTestCase(WhooshTestCase):\n    fixtures = [\"bulk_data.json\"]\n\n    def setUp(self):\n        super().setUp()\n\n        # Stow.\n        self.old_ui = connections[\"whoosh\"].get_unified_index()\n        self.ui = UnifiedIndex()\n        self.wmmi = WhooshMockSearchIndex()\n        self.wamsi = WhooshAnotherMockSearchIndex()\n        self.ui.build(indexes=[self.wmmi, self.wamsi])\n        self.sb = connections[\"whoosh\"].get_backend()\n        connections[\"whoosh\"]._index = self.ui\n\n        self.sb.setup()\n        self.raw_whoosh = self.sb.index\n        self.parser = QueryParser(self.sb.content_field_name, schema=self.sb.schema)\n        self.sb.delete_index()\n\n        self.wmmi.update(using=\"whoosh\")\n        self.wamsi.update(using=\"whoosh\")\n\n        self.sqs = SearchQuerySet(\"whoosh\")\n\n    def tearDown(self):\n        connections[\"whoosh\"]._index = self.old_ui\n        super().tearDown()\n\n    def test_searchquerysets_with_models(self):\n        sqs = self.sqs.all()\n        self.assertEqual(sqs.query.build_query(), \"*\")\n        self.assertEqual(len(sqs), 25)\n\n        sqs = self.sqs.models(MockModel)\n        self.assertEqual(sqs.query.build_query(), \"*\")\n        self.assertEqual(len(sqs), 23)\n\n        sqs = self.sqs.models(AnotherMockModel)\n        self.assertEqual(sqs.query.build_query(), \"*\")\n        self.assertEqual(len(sqs), 2)\n\n\nclass LiveWhooshMoreLikeThisTestCase(WhooshTestCase):\n    fixtures = [\"bulk_data.json\"]\n\n    def setUp(self):\n        super().setUp()\n\n        # Stow.\n        self.old_ui = connections[\"whoosh\"].get_unified_index()\n        self.ui = UnifiedIndex()\n        self.wmmi = WhooshMockSearchIndex()\n        self.wamsi = WhooshAnotherMockSearchIndex()\n        self.ui.build(indexes=[self.wmmi, self.wamsi])\n        self.sb = connections[\"whoosh\"].get_backend()\n        connections[\"whoosh\"]._index = self.ui\n\n        self.sb.setup()\n        self.raw_whoosh = self.sb.index\n        self.parser = QueryParser(self.sb.content_field_name, schema=self.sb.schema)\n        self.sb.delete_index()\n\n        self.wmmi.update()\n        self.wamsi.update()\n\n        self.sqs = SearchQuerySet(\"whoosh\")\n\n    def tearDown(self):\n        connections[\"whoosh\"]._index = self.old_ui\n        super().tearDown()\n\n    # We expect failure here because, despite not changing the code, Whoosh\n    # 2.5.1 returns incorrect counts/results. Huzzah.\n    @unittest.expectedFailure\n    def test_more_like_this(self):\n        mlt = self.sqs.more_like_this(MockModel.objects.get(pk=22))\n        self.assertEqual(mlt.count(), 22)\n        self.assertEqual(\n            sorted([result.pk for result in mlt]),\n            sorted(\n                [\n                    \"9\",\n                    \"8\",\n                    \"7\",\n                    \"6\",\n                    \"5\",\n                    \"4\",\n                    \"3\",\n                    \"2\",\n                    \"1\",\n                    \"21\",\n                    \"20\",\n                    \"19\",\n                    \"18\",\n                    \"17\",\n                    \"16\",\n                    \"15\",\n                    \"14\",\n                    \"13\",\n                    \"12\",\n                    \"11\",\n                    \"10\",\n                    \"23\",\n                ]\n            ),\n        )\n        self.assertEqual(len([result.pk for result in mlt]), 22)\n\n        alt_mlt = self.sqs.filter(name=\"daniel3\").more_like_this(\n            MockModel.objects.get(pk=13)\n        )\n        self.assertEqual(alt_mlt.count(), 8)\n        self.assertEqual(\n            sorted([result.pk for result in alt_mlt]),\n            sorted([\"4\", \"3\", \"22\", \"19\", \"17\", \"16\", \"10\", \"23\"]),\n        )\n        self.assertEqual(len([result.pk for result in alt_mlt]), 8)\n\n        alt_mlt_with_models = self.sqs.models(MockModel).more_like_this(\n            MockModel.objects.get(pk=11)\n        )\n        self.assertEqual(alt_mlt_with_models.count(), 22)\n        self.assertEqual(\n            sorted([result.pk for result in alt_mlt_with_models]),\n            sorted(\n                [\n                    \"9\",\n                    \"8\",\n                    \"7\",\n                    \"6\",\n                    \"5\",\n                    \"4\",\n                    \"3\",\n                    \"2\",\n                    \"1\",\n                    \"22\",\n                    \"21\",\n                    \"20\",\n                    \"19\",\n                    \"18\",\n                    \"17\",\n                    \"16\",\n                    \"15\",\n                    \"14\",\n                    \"13\",\n                    \"12\",\n                    \"10\",\n                    \"23\",\n                ]\n            ),\n        )\n        self.assertEqual(len([result.pk for result in alt_mlt_with_models]), 22)\n\n        if hasattr(MockModel.objects, \"defer\"):\n            # Make sure MLT works with deferred bits.\n            mi = MockModel.objects.defer(\"foo\").get(pk=22)\n            deferred = self.sqs.models(MockModel).more_like_this(mi)\n            self.assertEqual(deferred.count(), 22)\n            self.assertEqual(\n                sorted([result.pk for result in deferred]),\n                sorted(\n                    [\n                        \"9\",\n                        \"8\",\n                        \"7\",\n                        \"6\",\n                        \"5\",\n                        \"4\",\n                        \"3\",\n                        \"2\",\n                        \"1\",\n                        \"21\",\n                        \"20\",\n                        \"19\",\n                        \"18\",\n                        \"17\",\n                        \"16\",\n                        \"15\",\n                        \"14\",\n                        \"13\",\n                        \"12\",\n                        \"11\",\n                        \"10\",\n                        \"23\",\n                    ]\n                ),\n            )\n            self.assertEqual(len([result.pk for result in deferred]), 22)\n\n        # Ensure that swapping the ``result_class`` works.\n        self.assertTrue(\n            isinstance(\n                self.sqs.result_class(MockSearchResult).more_like_this(\n                    MockModel.objects.get(pk=21)\n                )[0],\n                MockSearchResult,\n            )\n        )\n\n\n@override_settings(DEBUG=True)\nclass LiveWhooshAutocompleteTestCase(WhooshTestCase):\n    fixtures = [\"bulk_data.json\"]\n\n    def setUp(self):\n        super().setUp()\n\n        # Stow.\n        self.old_ui = connections[\"whoosh\"].get_unified_index()\n        self.ui = UnifiedIndex()\n        self.wacsi = WhooshAutocompleteMockModelSearchIndex()\n        self.ui.build(indexes=[self.wacsi])\n        self.sb = connections[\"whoosh\"].get_backend()\n        connections[\"whoosh\"]._index = self.ui\n\n        # Stow.\n        import haystack\n\n        self.sb.setup()\n        self.sqs = SearchQuerySet(\"whoosh\")\n\n        # Wipe it clean.\n        self.sqs.query.backend.clear()\n\n        self.wacsi.update(using=\"whoosh\")\n\n    def tearDown(self):\n        connections[\"whoosh\"]._index = self.old_ui\n        super().tearDown()\n\n    def test_autocomplete(self):\n        autocomplete = self.sqs.autocomplete(text_auto=\"mod\")\n        self.assertEqual(autocomplete.count(), 5)\n        self.assertEqual(\n            [result.pk for result in autocomplete], [\"1\", \"12\", \"6\", \"7\", \"14\"]\n        )\n        self.assertTrue(\"mod\" in autocomplete[0].text.lower())\n        self.assertTrue(\"mod\" in autocomplete[1].text.lower())\n        self.assertTrue(\"mod\" in autocomplete[2].text.lower())\n        self.assertTrue(\"mod\" in autocomplete[3].text.lower())\n        self.assertTrue(\"mod\" in autocomplete[4].text.lower())\n        self.assertEqual(len([result.pk for result in autocomplete]), 5)\n\n    def test_edgengram_regression(self):\n        autocomplete = self.sqs.autocomplete(text_auto=\"ngm\")\n        self.assertEqual(autocomplete.count(), 0)\n\n    def test_extra_whitespace(self):\n        autocomplete = self.sqs.autocomplete(text_auto=\"mod \")\n        self.assertEqual(autocomplete.count(), 5)\n\n\nclass WhooshRoundTripSearchIndex(indexes.SearchIndex, indexes.Indexable):\n    text = indexes.CharField(document=True, default=\"\")\n    name = indexes.CharField()\n    is_active = indexes.BooleanField()\n    post_count = indexes.IntegerField()\n    average_rating = indexes.FloatField()\n    price = indexes.DecimalField()\n    pub_date = indexes.DateField()\n    created = indexes.DateTimeField()\n    tags = indexes.MultiValueField()\n    sites = indexes.MultiValueField()\n    # For a regression involving lists with nothing in them.\n    empty_list = indexes.MultiValueField()\n\n    def get_model(self):\n        return MockModel\n\n    def prepare(self, obj):\n        prepped = super().prepare(obj)\n        prepped.update(\n            {\n                \"text\": \"This is some example text.\",\n                \"name\": \"Mister Pants\",\n                \"is_active\": True,\n                \"post_count\": 25,\n                \"average_rating\": 3.6,\n                \"price\": Decimal(\"24.99\"),\n                \"pub_date\": date(2009, 11, 21),\n                \"created\": datetime(2009, 11, 21, 21, 31, 00),\n                \"tags\": [\"staff\", \"outdoor\", \"activist\", \"scientist\"],\n                \"sites\": [3, 5, 1],\n                \"empty_list\": [],\n            }\n        )\n        return prepped\n\n\n@override_settings(DEBUG=True)\nclass LiveWhooshRoundTripTestCase(WhooshTestCase):\n    def setUp(self):\n        super().setUp()\n\n        # Stow.\n        self.old_ui = connections[\"whoosh\"].get_unified_index()\n        self.ui = UnifiedIndex()\n        self.wrtsi = WhooshRoundTripSearchIndex()\n        self.ui.build(indexes=[self.wrtsi])\n        self.sb = connections[\"whoosh\"].get_backend()\n        connections[\"whoosh\"]._index = self.ui\n\n        self.sb.setup()\n        self.raw_whoosh = self.sb.index\n        self.parser = QueryParser(self.sb.content_field_name, schema=self.sb.schema)\n        self.sb.delete_index()\n\n        self.sqs = SearchQuerySet(\"whoosh\")\n\n        # Wipe it clean.\n        self.sqs.query.backend.clear()\n\n        # Fake indexing.\n        mock = MockModel()\n        mock.id = 1\n        self.sb.update(self.wrtsi, [mock])\n\n    def tearDown(self):\n        super().tearDown()\n\n    def test_round_trip(self):\n        results = self.sqs.filter(id=\"core.mockmodel.1\")\n\n        # Sanity check.\n        self.assertEqual(results.count(), 1)\n\n        # Check the individual fields.\n        result = results[0]\n        self.assertEqual(result.id, \"core.mockmodel.1\")\n        self.assertEqual(result.text, \"This is some example text.\")\n        self.assertEqual(result.name, \"Mister Pants\")\n        self.assertEqual(result.is_active, True)\n        self.assertEqual(result.post_count, 25)\n        self.assertEqual(result.average_rating, 3.6)\n        self.assertEqual(result.price, \"24.99\")\n        self.assertEqual(result.pub_date, datetime(2009, 11, 21, 0, 0))\n        self.assertEqual(result.created, datetime(2009, 11, 21, 21, 31, 00))\n        self.assertEqual(result.tags, [\"staff\", \"outdoor\", \"activist\", \"scientist\"])\n        self.assertEqual(result.sites, [\"3\", \"5\", \"1\"])\n        self.assertEqual(result.empty_list, [])\n\n        # Check boolean filtering...\n        results = self.sqs.filter(id=\"core.mockmodel.1\", is_active=True)\n        self.assertEqual(results.count(), 1)\n\n\n@override_settings(DEBUG=True)\nclass LiveWhooshRamStorageTestCase(TestCase):\n    def setUp(self):\n        super().setUp()\n\n        # Stow.\n        self.old_whoosh_storage = settings.HAYSTACK_CONNECTIONS[\"whoosh\"].get(\n            \"STORAGE\", \"file\"\n        )\n        settings.HAYSTACK_CONNECTIONS[\"whoosh\"][\"STORAGE\"] = \"ram\"\n\n        self.old_ui = connections[\"whoosh\"].get_unified_index()\n        self.ui = UnifiedIndex()\n        self.wrtsi = WhooshRoundTripSearchIndex()\n        self.ui.build(indexes=[self.wrtsi])\n        self.sb = connections[\"whoosh\"].get_backend()\n        connections[\"whoosh\"]._index = self.ui\n\n        # Stow.\n        import haystack\n\n        self.sb.setup()\n        self.raw_whoosh = self.sb.index\n        self.parser = QueryParser(self.sb.content_field_name, schema=self.sb.schema)\n\n        self.sqs = SearchQuerySet(\"whoosh\")\n\n        # Wipe it clean.\n        self.sqs.query.backend.clear()\n\n        # Fake indexing.\n        mock = MockModel()\n        mock.id = 1\n        self.sb.update(self.wrtsi, [mock])\n\n    def tearDown(self):\n        self.sqs.query.backend.clear()\n\n        settings.HAYSTACK_CONNECTIONS[\"whoosh\"][\"STORAGE\"] = self.old_whoosh_storage\n        connections[\"whoosh\"]._index = self.old_ui\n        super().tearDown()\n\n    def test_ram_storage(self):\n        results = self.sqs.filter(id=\"core.mockmodel.1\")\n\n        # Sanity check.\n        self.assertEqual(results.count(), 1)\n\n        # Check the individual fields.\n        result = results[0]\n        self.assertEqual(result.id, \"core.mockmodel.1\")\n        self.assertEqual(result.text, \"This is some example text.\")\n        self.assertEqual(result.name, \"Mister Pants\")\n        self.assertEqual(result.is_active, True)\n        self.assertEqual(result.post_count, 25)\n        self.assertEqual(result.average_rating, 3.6)\n        self.assertEqual(result.pub_date, datetime(2009, 11, 21, 0, 0))\n        self.assertEqual(result.created, datetime(2009, 11, 21, 21, 31, 00))\n        self.assertEqual(result.tags, [\"staff\", \"outdoor\", \"activist\", \"scientist\"])\n        self.assertEqual(result.sites, [\"3\", \"5\", \"1\"])\n        self.assertEqual(result.empty_list, [])\n"
  },
  {
    "path": "test_haystack/whoosh_tests/test_whoosh_query.py",
    "content": "import datetime\n\nfrom haystack import connections\nfrom haystack.inputs import Exact\nfrom haystack.models import SearchResult\nfrom haystack.query import SQ, SearchQuerySet\n\nfrom ..core.models import AnotherMockModel, MockModel\nfrom .testcases import WhooshTestCase\n\n\nclass WhooshSearchQueryTestCase(WhooshTestCase):\n    def setUp(self):\n        super().setUp()\n\n        self.sq = connections[\"whoosh\"].get_query()\n\n    def test_build_query_all(self):\n        self.assertEqual(self.sq.build_query(), \"*\")\n\n    def test_build_query_single_word(self):\n        self.sq.add_filter(SQ(content=\"hello\"))\n        self.assertEqual(self.sq.build_query(), \"(hello)\")\n\n    def test_build_query_multiple_words_and(self):\n        self.sq.add_filter(SQ(content=\"hello\"))\n        self.sq.add_filter(SQ(content=\"world\"))\n        self.assertEqual(self.sq.build_query(), \"((hello) AND (world))\")\n\n    def test_build_query_multiple_words_not(self):\n        self.sq.add_filter(~SQ(content=\"hello\"))\n        self.sq.add_filter(~SQ(content=\"world\"))\n        self.assertEqual(self.sq.build_query(), \"(NOT ((hello)) AND NOT ((world)))\")\n\n    def test_build_query_multiple_words_or(self):\n        self.sq.add_filter(SQ(content=\"hello\") | SQ(content=\"world\"))\n        self.assertEqual(self.sq.build_query(), \"((hello) OR (world))\")\n\n    def test_build_query_multiple_words_mixed(self):\n        self.sq.add_filter(SQ(content=\"why\") | SQ(content=\"hello\"))\n        self.sq.add_filter(~SQ(content=\"world\"))\n        self.assertEqual(\n            self.sq.build_query(), \"(((why) OR (hello)) AND NOT ((world)))\"\n        )\n\n    def test_build_query_phrase(self):\n        self.sq.add_filter(SQ(content=\"hello world\"))\n        self.assertEqual(self.sq.build_query(), \"(hello AND world)\")\n\n        self.sq.add_filter(SQ(content__exact=\"hello world\"))\n        self.assertEqual(\n            self.sq.build_query(), '((hello AND world) AND (\"hello world\"))'\n        )\n\n    def test_build_query_boost(self):\n        self.sq.add_filter(SQ(content=\"hello\"))\n        self.sq.add_boost(\"world\", 5)\n        self.assertEqual(self.sq.build_query(), \"(hello) world^5\")\n\n    def test_correct_exact(self):\n        self.sq.add_filter(SQ(content=Exact(\"hello world\")))\n        self.assertEqual(self.sq.build_query(), '(\"hello world\")')\n\n    def test_build_query_multiple_filter_types(self):\n        self.sq.add_filter(SQ(content=\"why\"))\n        self.sq.add_filter(SQ(pub_date__lte=datetime.datetime(2009, 2, 10, 1, 59)))\n        self.sq.add_filter(SQ(author__gt=\"daniel\"))\n        self.sq.add_filter(SQ(created__lt=datetime.datetime(2009, 2, 12, 12, 13)))\n        self.sq.add_filter(SQ(title__gte=\"B\"))\n        self.sq.add_filter(SQ(id__in=[1, 2, 3]))\n        self.sq.add_filter(SQ(rating__range=[3, 5]))\n        self.assertEqual(\n            self.sq.build_query(),\n            \"((why) AND pub_date:([to 20090210015900]) AND author:({daniel to}) AND created:({to 20090212121300}) AND title:([B to]) AND id:(1 OR 2 OR 3) AND rating:([3 to 5]))\",\n        )\n\n    def test_build_query_in_filter_multiple_words(self):\n        self.sq.add_filter(SQ(content=\"why\"))\n        self.sq.add_filter(SQ(title__in=[\"A Famous Paper\", \"An Infamous Article\"]))\n        self.assertEqual(\n            self.sq.build_query(),\n            '((why) AND title:(\"A Famous Paper\" OR \"An Infamous Article\"))',\n        )\n\n    def test_build_query_in_filter_datetime(self):\n        self.sq.add_filter(SQ(content=\"why\"))\n        self.sq.add_filter(SQ(pub_date__in=[datetime.datetime(2009, 7, 6, 1, 56, 21)]))\n        self.assertEqual(self.sq.build_query(), \"((why) AND pub_date:(20090706015621))\")\n\n    def test_build_query_in_with_set(self):\n        self.sq.add_filter(SQ(content=\"why\"))\n        self.sq.add_filter(SQ(title__in=set([\"A Famous Paper\", \"An Infamous Article\"])))\n        query = self.sq.build_query()\n        self.assertTrue(\"(why)\" in query)\n\n        # Because ordering in Py3 is now random.\n        if 'title:(\"A ' in query:\n            self.assertTrue(\n                'title:(\"A Famous Paper\" OR \"An Infamous Article\")' in query\n            )\n        else:\n            self.assertTrue(\n                'title:(\"An Infamous Article\" OR \"A Famous Paper\")' in query\n            )\n\n    def test_build_query_wildcard_filter_types(self):\n        self.sq.add_filter(SQ(content=\"why\"))\n        self.sq.add_filter(SQ(title__startswith=\"haystack\"))\n        self.assertEqual(self.sq.build_query(), \"((why) AND title:(haystack*))\")\n\n    def test_build_query_fuzzy_filter_types(self):\n        self.sq.add_filter(SQ(content=\"why\"))\n        self.sq.add_filter(SQ(title__fuzzy=\"haystack\"))\n        self.assertEqual(self.sq.build_query(), \"((why) AND title:(haystack~2/3))\")\n\n    def test_build_query_with_contains(self):\n        self.sq.add_filter(SQ(content=\"circular\"))\n        self.sq.add_filter(SQ(title__contains=\"haystack\"))\n        self.assertEqual(self.sq.build_query(), \"((circular) AND title:(*haystack*))\")\n\n    def test_build_query_with_endswith(self):\n        self.sq.add_filter(SQ(content=\"circular\"))\n        self.sq.add_filter(SQ(title__endswith=\"haystack\"))\n        self.assertEqual(self.sq.build_query(), \"((circular) AND title:(*haystack))\")\n\n    def test_clean(self):\n        self.assertEqual(self.sq.clean(\"hello world\"), \"hello world\")\n        self.assertEqual(self.sq.clean(\"hello AND world\"), \"hello and world\")\n        self.assertEqual(\n            self.sq.clean(\n                'hello AND OR NOT TO + - && || ! ( ) { } [ ] ^ \" ~ * ? : \\ world'\n            ),\n            \"hello and or not to '+' '-' '&&' '||' '!' '(' ')' '{' '}' '[' ']' '^' '\\\"' '~' '*' '?' ':' '\\\\' world\",\n        )\n        self.assertEqual(\n            self.sq.clean(\"so please NOTe i am in a bAND and bORed\"),\n            \"so please NOTe i am in a bAND and bORed\",\n        )\n\n    def test_build_query_with_models(self):\n        self.sq.add_filter(SQ(content=\"hello\"))\n        self.sq.add_model(MockModel)\n        self.assertEqual(self.sq.build_query(), \"(hello)\")\n\n        self.sq.add_model(AnotherMockModel)\n        self.assertEqual(self.sq.build_query(), \"(hello)\")\n\n    def test_build_query_with_datetime(self):\n        self.sq.add_filter(SQ(pub_date=datetime.datetime(2009, 5, 9, 16, 20)))\n        self.assertEqual(self.sq.build_query(), \"pub_date:(20090509162000)\")\n\n    def test_build_query_with_sequence_and_filter_not_in(self):\n        self.sq.add_filter(SQ(id=[1, 2, 3]))\n        self.assertEqual(self.sq.build_query(), \"id:(1,2,3)\")\n\n    def test_set_result_class(self):\n        # Assert that we're defaulting to ``SearchResult``.\n        self.assertTrue(issubclass(self.sq.result_class, SearchResult))\n\n        # Custom class.\n        class IttyBittyResult(object):\n            pass\n\n        self.sq.set_result_class(IttyBittyResult)\n        self.assertTrue(issubclass(self.sq.result_class, IttyBittyResult))\n\n        # Reset to default.\n        self.sq.set_result_class(None)\n        self.assertTrue(issubclass(self.sq.result_class, SearchResult))\n\n    def test_in_filter_values_list(self):\n        self.sq.add_filter(SQ(content=\"why\"))\n        self.sq.add_filter(SQ(title__in=MockModel.objects.values_list(\"id\", flat=True)))\n        self.assertEqual(self.sq.build_query(), \"((why) AND title:(1 OR 2 OR 3))\")\n\n    def test_narrow_sq(self):\n        sqs = SearchQuerySet(using=\"whoosh\").narrow(SQ(foo=\"moof\"))\n        self.assertTrue(isinstance(sqs, SearchQuerySet))\n        self.assertEqual(len(sqs.query.narrow_queries), 1)\n        self.assertEqual(sqs.query.narrow_queries.pop(), \"foo:(moof)\")\n"
  },
  {
    "path": "test_haystack/whoosh_tests/testcases.py",
    "content": "import os\nimport shutil\n\nfrom django.conf import settings\nfrom django.test import TestCase\n\n\nclass WhooshTestCase(TestCase):\n    fixtures = [\"base_data\"]\n\n    @classmethod\n    def setUpClass(cls):\n        for name, conn_settings in settings.HAYSTACK_CONNECTIONS.items():\n            if (\n                conn_settings[\"ENGINE\"]\n                != \"haystack.backends.whoosh_backend.WhooshEngine\"\n            ):\n                continue\n\n            if \"STORAGE\" in conn_settings and conn_settings[\"STORAGE\"] != \"file\":\n                continue\n\n            # Start clean\n            if os.path.exists(conn_settings[\"PATH\"]):\n                shutil.rmtree(conn_settings[\"PATH\"])\n\n            from haystack import connections\n\n            connections[name].get_backend().setup()\n\n        super(WhooshTestCase, cls).setUpClass()\n\n    @classmethod\n    def tearDownClass(cls):\n        for conn in settings.HAYSTACK_CONNECTIONS.values():\n            if conn[\"ENGINE\"] != \"haystack.backends.whoosh_backend.WhooshEngine\":\n                continue\n\n            if \"STORAGE\" in conn and conn[\"STORAGE\"] != \"file\":\n                continue\n\n            # Start clean\n            if os.path.exists(conn[\"PATH\"]):\n                shutil.rmtree(conn[\"PATH\"])\n\n        super(WhooshTestCase, cls).tearDownClass()\n"
  },
  {
    "path": "tox.ini",
    "content": "[tox]\nenvlist =\n    docs\n    py35-django2.2-es{1.x,2.x,5.x}\n    py{36,37,38,py}-django{2.2,3.0}-es{1.x,2.x,5.x}\n\n\n[testenv]\ncommands =\n    python test_haystack/solr_tests/server/wait-for-solr\n    python {toxinidir}/setup.py test\ndeps =\n    requests\n    django2.2: Django>=2.2,<3.0\n    django3.0: Django>=3.0,<3.1\n    es1.x: elasticsearch>=1,<2\n    es2.x: elasticsearch>=2,<3\n    es5.x: elasticsearch>=5,<6\nsetenv =\n    es1.x: VERSION_ES=>=1,<2\n    es2.x: VERSION_ES=>=2,<3\n    es5.x: VERSION_ES=>=5,<6\n\n\n[testenv:docs]\nchangedir = docs\ndeps =\n    sphinx\ncommands =\n    sphinx-build -W -b html -d {envtmpdir}/doctrees . {envtmpdir}/html\n"
  }
]