[
  {
    "path": ".gitattributes",
    "content": "kanren/_version.py export-subst\n"
  },
  {
    "path": ".github/FUNDING.yml",
    "content": "github: [brandonwillard]\n"
  },
  {
    "path": ".github/workflows/pypi.yml",
    "content": "name: PyPI\non:\n  push:\n    branches:\n      - main\n      - auto-release\n  pull_request:\n    branches: [main]\n  release:\n    types: [published]\n\n# Cancels all previous workflow runs for pull requests that have not completed.\nconcurrency:\n  # The concurrency group contains the workflow name and the branch name for pull requests\n  # or the commit hash for any other events.\n  group: ${{ github.workflow }}-${{ github.event_name == 'pull_request' && github.head_ref || github.sha }}\n  cancel-in-progress: true\n\njobs:\n  build:\n    name: Build distributions\n    runs-on: ubuntu-latest\n    steps:\n      - uses: actions/checkout@v4\n        with:\n          fetch-depth: 0\n      - uses: actions/setup-python@v5\n        with:\n          python-version: \"3.10\"\n      - name: Build distributions\n        run: |\n          pip install build\n          python -m build\n      - name: Check the sdist installs and imports\n        run: |\n          mkdir -p test-sdist\n          cd test-sdist\n          python -m venv venv-sdist\n          venv-sdist/bin/python -m pip install ../dist/minikanren-*.tar.gz\n      - name: Check the wheel installs and imports\n        run: |\n          mkdir -p test-wheel\n          cd test-wheel\n          python -m venv venv-wheel\n          venv-wheel/bin/python -m pip install ../dist/minikanren-*.whl\n      - uses: actions/upload-artifact@v4\n        with:\n          name: artifact\n          path: dist/*\n\n  upload_pypi:\n    name: Upload to PyPI on release\n    needs: [build]\n    runs-on: ubuntu-latest\n    if: github.event_name == 'release' && github.event.action == 'published'\n    steps:\n      - uses: actions/download-artifact@v4\n        with:\n          name: artifact\n          path: dist\n      - uses: pypa/gh-action-pypi-publish@release/v1\n        with:\n          user: __token__\n          password: ${{ secrets.pypi_secret }}\n"
  },
  {
    "path": ".github/workflows/tests.yml",
    "content": "name: Tests\n\non:\n  push:\n    branches:\n      - main\n  pull_request:\n    branches:\n      - main\n\n# Cancels all previous workflow runs for pull requests that have not completed.\nconcurrency:\n  # The concurrency group contains the workflow name and the branch name for pull requests\n  # or the commit hash for any other events.\n  group: ${{ github.workflow }}-${{ github.event_name == 'pull_request' && github.head_ref || github.sha }}\n  cancel-in-progress: true\n\njobs:\n  changes:\n    name: \"Check for changes\"\n    runs-on: ubuntu-latest\n    outputs:\n      changes: ${{ steps.changes.outputs.src }}\n    steps:\n      - uses: actions/checkout@v4\n        with:\n          fetch-depth: 0\n      - uses: dorny/paths-filter@v3\n        id: changes\n        with:\n          filters: |\n            python: &python\n            - 'kanren/**/*.py'\n            - 'tests/**/*.py'\n            - '*.py'\n            src:\n            - *python\n            - '.github/**/*.yml'\n            - 'setup.cfg'\n            - 'requirements.txt'\n            - '.coveragerc'\n            - '.pre-commit-config.yaml'\n\n  style:\n    name: Check code style\n    needs: changes\n    runs-on: ubuntu-latest\n    if: ${{ needs.changes.outputs.changes == 'true' }}\n    steps:\n    - uses: actions/checkout@v4\n    - uses: actions/setup-python@v5\n      with:\n        python-version: \"3.10\"\n    - uses: pre-commit/action@v3.0.1\n\n  test:\n    needs:\n      - changes\n      - style\n    runs-on: ubuntu-latest\n    if: ${{ needs.changes.outputs.changes == 'true' && needs.style.result == 'success' }}\n    strategy:\n      matrix:\n        python-version:\n          - \"3.9\"\n          - \"3.10\"\n          - \"3.11\"\n          - \"3.12\"\n          - \"pypy3.9\"\n    steps:\n    - uses: actions/checkout@v4\n    - uses: actions/setup-python@v5\n      with:\n        python-version: ${{ matrix.python-version }}\n    - name: Install dependencies\n      run: |\n        python -m pip install --upgrade pip\n        if [ -f requirements.txt ]; then pip install -r requirements.txt; fi\n    - name: Test with pytest\n      run: |\n        pytest -v tests/ --cov=kanren --cov-report=xml:./coverage.xml\n    - name: Coveralls\n      uses: AndreMiras/coveralls-python-action@develop\n      with:\n        parallel: true\n        flag-name: run-${{ matrix.python-version }}\n\n  all-checks:\n    if: ${{ always() }}\n    runs-on: ubuntu-latest\n    name: \"All tests\"\n    needs: [changes, style, test]\n    steps:\n      - name: Check build matrix status\n        if: ${{ needs.changes.outputs.changes == 'true' && (needs.style.result != 'success' || needs.test.result != 'success') }}\n        run: exit 1\n\n  upload-coverage:\n    name: \"Upload coverage\"\n    needs: [changes, all-checks]\n    if: ${{ needs.changes.outputs.changes == 'true' && needs.all-checks.result == 'success' }}\n    runs-on: ubuntu-latest\n    steps:\n    - name: Coveralls Finished\n      uses: AndreMiras/coveralls-python-action@develop\n      with:\n        parallel-finished: true\n"
  },
  {
    "path": ".gitignore",
    "content": "# Created by https://www.gitignore.io/api/vim,emacs,python\n# Edit at https://www.gitignore.io/?templates=vim,emacs,python\n\n### Emacs ###\n# -*- mode: gitignore; -*-\n*~\n\\#*\\#\n/.emacs.desktop\n/.emacs.desktop.lock\n*.elc\nauto-save-list\ntramp\n.\\#*\n\n# Org-mode\n.org-id-locations\n*_archive\n\n# flymake-mode\n*_flymake.*\n\n# eshell files\n/eshell/history\n/eshell/lastdir\n\n# elpa packages\n/elpa/\n\n# reftex files\n*.rel\n\n# AUCTeX auto folder\n/auto/\n\n# cask packages\n.cask/\ndist/\n\n# Flycheck\nflycheck_*.el\n\n# server auth directory\n/server/\n\n# projectiles files\n.projectile\n\n# directory configuration\n.dir-locals.el\n\n# network security\n/network-security.data\n\n\n### Python ###\n# Byte-compiled / optimized / DLL files\n__pycache__/\n*.py[cod]\n*$py.class\n\n# C extensions\n*.so\n\n# Distribution / packaging\n.Python\nbuild/\ndevelop-eggs/\ndownloads/\neggs/\n.eggs/\nlib/\nlib64/\nparts/\nsdist/\nvar/\nwheels/\npip-wheel-metadata/\nshare/python-wheels/\n*.egg-info/\n.installed.cfg\n*.egg\nMANIFEST\n\n# PyInstaller\n#  Usually these files are written by a python script from a template\n#  before PyInstaller builds the exe, so as to inject date/other infos into it.\n*.manifest\n*.spec\n\n# Installer logs\npip-log.txt\npip-delete-this-directory.txt\n\n# Unit test / coverage reports\nhtmlcov/\n.tox/\n.nox/\n.coverage\n.coverage.*\n.cache\nnosetests.xml\ncoverage.xml\n*.cover\n.hypothesis/\n.pytest_cache/\ntesting-report.html\n\n# Translations\n*.mo\n*.pot\n\n# Django stuff:\n*.log\nlocal_settings.py\ndb.sqlite3\ndb.sqlite3-journal\n\n# Flask stuff:\ninstance/\n.webassets-cache\n\n# Scrapy stuff:\n.scrapy\n\n# Sphinx documentation\ndocs/_build/\n\n# PyBuilder\ntarget/\n\n# Jupyter Notebook\n.ipynb_checkpoints\n\n# IPython\nprofile_default/\nipython_config.py\n\n# pyenv\n.python-version\n\n# pipenv\n#   According to pypa/pipenv#598, it is recommended to include Pipfile.lock in version control.\n#   However, in case of collaboration, if having platform-specific dependencies or dependencies\n#   having no cross-platform support, pipenv may install dependencies that don't work, or not\n#   install all needed dependencies.\n#Pipfile.lock\n\n# celery beat schedule file\ncelerybeat-schedule\n\n# SageMath parsed files\n*.sage.py\n\n# Environments\n.env\n.venv\nenv/\nvenv/\nENV/\nenv.bak/\nvenv.bak/\n\n# Spyder project settings\n.spyderproject\n.spyproject\n\n# Rope project settings\n.ropeproject\n\n# mkdocs documentation\n/site\n\n# mypy\n.mypy_cache/\n.dmypy.json\ndmypy.json\n\n# Pyre type checker\n.pyre/\n\n### Vim ###\n# Swap\n[._]*.s[a-v][a-z]\n[._]*.sw[a-p]\n[._]s[a-rt-v][a-z]\n[._]ss[a-gi-z]\n[._]sw[a-p]\n\n# Session\nSession.vim\nSessionx.vim\n\n# Temporary\n.netrwhist\n# Auto-generated tag files\ntags\n# Persistent undo\n[._]*.un~\n\n# End of https://www.gitignore.io/api/vim,emacs,python"
  },
  {
    "path": ".pre-commit-config.yaml",
    "content": "exclude: |\n    (?x)^(\n        versioneer\\.py|\n        kanren/_version\\.py|\n        doc/.*|\n        bin/.*\n    )$\nrepos:\n  - repo: https://github.com/pre-commit/pre-commit-hooks\n    rev: v4.4.0\n    hooks:\n      - id: debug-statements\n        exclude: |\n          (?x)^(\n              kanren/core\\.py|\n          )$\n      - id: check-merge-conflict\n  - repo: https://github.com/psf/black\n    rev: 22.12.0\n    hooks:\n      - id: black\n        language_version: python3\n  - repo: https://github.com/pycqa/flake8\n    rev: 7.0.0\n    hooks:\n      - id: flake8\n        args: ['--ignore=E721,E712,E501']\n  - repo: https://github.com/pycqa/isort\n    rev: 5.13.2\n    hooks:\n      - id: isort\n  - repo: https://github.com/PyCQA/autoflake\n    rev: v2.3.0\n    hooks:\n      - id: autoflake\n        exclude: |\n          (?x)^(\n              .*/?__init__\\.py|\n          )$\n        args: ['--in-place', '--remove-all-unused-imports', '--remove-unused-variables']\n  - repo: https://github.com/pre-commit/mirrors-mypy\n    rev: v0.991\n    hooks:\n      - id: mypy\n        additional_dependencies:\n        - numpy>=1.20\n        - types-filelock\n        - types-setuptools\n"
  },
  {
    "path": ".pylintrc",
    "content": "[MASTER]\n# Use multiple processes to speed up Pylint.\njobs=0\n\n# Allow loading of arbitrary C extensions. Extensions are imported into the\n# active Python interpreter and may run arbitrary code.\nunsafe-load-any-extension=no\n\n# Allow optimization of some AST trees. This will activate a peephole AST\n# optimizer, which will apply various small optimizations. For instance, it can\n# be used to obtain the result of joining multiple strings with the addition\n# operator. Joining a lot of strings can lead to a maximum recursion error in\n# Pylint and this flag can prevent that. It has one side effect, the resulting\n# AST will be different than the one from reality.\noptimize-ast=no\n\n[MESSAGES CONTROL]\n\n# Only show warnings with the listed confidence levels. Leave empty to show\n# all. Valid levels: HIGH, INFERENCE, INFERENCE_FAILURE, UNDEFINED\nconfidence=\n\n# Disable the message, report, category or checker with the given id(s). You\n# can either give multiple identifiers separated by comma (,) or put this\n# option multiple times (only on the command line, not in the configuration\n# file where it should appear only once).You can also use \"--disable=all\" to\n# disable everything first and then reenable specific checks. For example, if\n# you want to run only the similarities checker, you can use \"--disable=all\n# --enable=similarities\". If you want to run only the classes checker, but have\n# no Warning level messages displayed, use\"--disable=all --enable=classes\n# --disable=W\"\ndisable=all\n\n# Enable the message, report, category or checker with the given id(s). You can\n# either give multiple identifier separated by comma (,) or put this option\n# multiple time. See also the \"--disable\" option for examples.\nenable=import-error,\n       import-self,\n       reimported,\n       wildcard-import,\n       misplaced-future,\n       relative-import,\n       deprecated-module,\n       unpacking-non-sequence,\n       invalid-all-object,\n       undefined-all-variable,\n       used-before-assignment,\n       cell-var-from-loop,\n       global-variable-undefined,\n       dangerous-default-value,\n       # redefined-builtin,\n       redefine-in-handler,\n       unused-import,\n       unused-wildcard-import,\n       global-variable-not-assigned,\n       undefined-loop-variable,\n       global-at-module-level,\n       bad-open-mode,\n       redundant-unittest-assert,\n       boolean-datetime,\n       # unused-variable\n\n\n[REPORTS]\n\n# Set the output format. Available formats are text, parseable, colorized, msvs\n# (visual studio) and html. You can also give a reporter class, eg\n# mypackage.mymodule.MyReporterClass.\noutput-format=parseable\n\n# Put messages in a separate file for each module / package specified on the\n# command line instead of printing them on stdout. Reports (if any) will be\n# written in a file name \"pylint_global.[txt|html]\".\nfiles-output=no\n\n# Tells whether to display a full report or only the messages\nreports=no\n\n# Python expression which should return a note less than 10 (10 is the highest\n# note). You have access to the variables errors warning, statement which\n# respectively contain the number of errors / warnings messages and the total\n# number of statements analyzed. This is used by the global evaluation report\n# (RP0004).\nevaluation=10.0 - ((float(5 * error + warning + refactor + convention) / statement) * 10)\n\n[BASIC]\n\n# List of builtins function names that should not be used, separated by a comma\nbad-functions=map,filter,input\n\n# Good variable names which should always be accepted, separated by a comma\ngood-names=i,j,k,ex,Run,_\n\n# Bad variable names which should always be refused, separated by a comma\nbad-names=foo,bar,baz,toto,tutu,tata\n\n# Colon-delimited sets of names that determine each other's naming style when\n# the name regexes allow several styles.\nname-group=\n\n# Include a hint for the correct naming format with invalid-name\ninclude-naming-hint=yes\n\n# Regular expression matching correct method names\nmethod-rgx=[a-z_][a-z0-9_]{2,30}$\n\n# Naming hint for method names\nmethod-name-hint=[a-z_][a-z0-9_]{2,30}$\n\n# Regular expression matching correct function names\nfunction-rgx=[a-z_][a-z0-9_]{2,30}$\n\n# Naming hint for function names\nfunction-name-hint=[a-z_][a-z0-9_]{2,30}$\n\n# Regular expression matching correct module names\nmodule-rgx=(([a-z_][a-z0-9_]*)|([A-Z][a-zA-Z0-9]+))$\n\n# Naming hint for module names\nmodule-name-hint=(([a-z_][a-z0-9_]*)|([A-Z][a-zA-Z0-9]+))$\n\n# Regular expression matching correct attribute names\nattr-rgx=[a-z_][a-z0-9_]{2,30}$\n\n# Naming hint for attribute names\nattr-name-hint=[a-z_][a-z0-9_]{2,30}$\n\n# Regular expression matching correct class attribute names\nclass-attribute-rgx=([A-Za-z_][A-Za-z0-9_]{2,30}|(__.*__))$\n\n# Naming hint for class attribute names\nclass-attribute-name-hint=([A-Za-z_][A-Za-z0-9_]{2,30}|(__.*__))$\n\n# Regular expression matching correct constant names\nconst-rgx=(([A-Z_][A-Z0-9_]*)|(__.*__))$\n\n# Naming hint for constant names\nconst-name-hint=(([A-Z_][A-Z0-9_]*)|(__.*__))$\n\n# Regular expression matching correct class names\nclass-rgx=[A-Z_][a-zA-Z0-9]+$\n\n# Naming hint for class names\nclass-name-hint=[A-Z_][a-zA-Z0-9]+$\n\n# Regular expression matching correct argument names\nargument-rgx=[a-z_][a-z0-9_]{2,30}$\n\n# Naming hint for argument names\nargument-name-hint=[a-z_][a-z0-9_]{2,30}$\n\n# Regular expression matching correct inline iteration names\ninlinevar-rgx=[A-Za-z_][A-Za-z0-9_]*$\n\n# Naming hint for inline iteration names\ninlinevar-name-hint=[A-Za-z_][A-Za-z0-9_]*$\n\n# Regular expression matching correct variable names\nvariable-rgx=[a-z_][a-z0-9_]{2,30}$\n\n# Naming hint for variable names\nvariable-name-hint=[a-z_][a-z0-9_]{2,30}$\n\n# Regular expression which should only match function or class names that do\n# not require a docstring.\nno-docstring-rgx=^_\n\n# Minimum line length for functions/classes that require docstrings, shorter\n# ones are exempt.\ndocstring-min-length=-1\n\n\n[ELIF]\n\n# Maximum number of nested blocks for function / method body\nmax-nested-blocks=5\n\n\n[FORMAT]\n\n# Maximum number of characters on a single line.\nmax-line-length=100\n\n# Regexp for a line that is allowed to be longer than the limit.\nignore-long-lines=^\\s*(# )?<?https?://\\S+>?$\n\n# Allow the body of an if to be on the same line as the test if there is no\n# else.\nsingle-line-if-stmt=no\n\n# List of optional constructs for which whitespace checking is disabled. `dict-\n# separator` is used to allow tabulation in dicts, etc.: {1  : 1,\\n222: 2}.\n# `trailing-comma` allows a space between comma and closing bracket: (a, ).\n# `empty-line` allows space-only lines.\nno-space-check=trailing-comma,dict-separator\n\n# Maximum number of lines in a module\nmax-module-lines=1000\n\n# String used as indentation unit. This is usually \"    \" (4 spaces) or \"\\t\" (1\n# tab).\nindent-string='    '\n\n# Number of spaces of indent required inside a hanging  or continued line.\nindent-after-paren=4\n\n# Expected format of line ending, e.g. empty (any line ending), LF or CRLF.\nexpected-line-ending-format=\n\n\n[LOGGING]\n\n# Logging modules to check that the string format arguments are in logging\n# function parameter format\nlogging-modules=logging\n\n\n[MISCELLANEOUS]\n\n# List of note tags to take in consideration, separated by a comma.\nnotes=FIXME,XXX,TODO\n\n\n[SIMILARITIES]\n\n# Minimum lines number of a similarity.\nmin-similarity-lines=4\n\n# Ignore comments when computing similarities.\nignore-comments=yes\n\n# Ignore docstrings when computing similarities.\nignore-docstrings=yes\n\n# Ignore imports when computing similarities.\nignore-imports=no\n\n\n[SPELLING]\n\n# Spelling dictionary name. Available dictionaries: none. To make it working\n# install python-enchant package.\nspelling-dict=\n\n# List of comma separated words that should not be checked.\nspelling-ignore-words=\n\n# A path to a file that contains private dictionary; one word per line.\nspelling-private-dict-file=\n\n# Tells whether to store unknown words to indicated private dictionary in\n# --spelling-private-dict-file option instead of raising a message.\nspelling-store-unknown-words=no\n\n\n[TYPECHECK]\n\n# Tells whether missing members accessed in mixin class should be ignored. A\n# mixin class is detected if its name ends with \"mixin\" (case insensitive).\nignore-mixin-members=yes\n\n# List of module names for which member attributes should not be checked\n# (useful for modules/projects where namespaces are manipulated during runtime\n# and thus existing member attributes cannot be deduced by static analysis. It\n# supports qualified module names, as well as Unix pattern matching.\nignored-modules=tensorflow.core.framework,tensorflow.python.framework,tensorflow.python.ops.gen_linalg_ops\n\n# List of classes names for which member attributes should not be checked\n# (useful for classes with attributes dynamically set). This supports can work\n# with qualified names.\nignored-classes=\n\n# List of members which are set dynamically and missed by pylint inference\n# system, and so shouldn't trigger E1101 when accessed. Python regular\n# expressions are accepted.\ngenerated-members=\n\n\n[VARIABLES]\n\n# Tells whether we should check for unused import in __init__ files.\ninit-import=no\n\n# A regular expression matching the name of dummy variables (i.e. expectedly\n# not used).\ndummy-variables-rgx=_$|dummy\n\n# List of additional names supposed to be defined in builtins. Remember that\n# you should avoid to define new builtins when possible.\nadditional-builtins=\n\n# List of strings which can identify a callback function by name. A callback\n# name must start or end with one of those strings.\ncallbacks=cb_,_cb\n\n\n[CLASSES]\n\n# List of method names used to declare (i.e. assign) instance attributes.\ndefining-attr-methods=__init__,__new__,setUp\n\n# List of valid names for the first argument in a class method.\nvalid-classmethod-first-arg=cls\n\n# List of valid names for the first argument in a metaclass class method.\nvalid-metaclass-classmethod-first-arg=mcs\n\n# List of member names, which should be excluded from the protected access\n# warning.\nexclude-protected=_asdict,_fields,_replace,_source,_make\n\n\n[DESIGN]\n\n# Maximum number of arguments for function / method\nmax-args=5\n\n# Argument names that match this expression will be ignored. Default to name\n# with leading underscore\nignored-argument-names=_.*\n\n# Maximum number of locals for function / method body\nmax-locals=15\n\n# Maximum number of return / yield for function / method body\nmax-returns=6\n\n# Maximum number of branch for function / method body\nmax-branches=12\n\n# Maximum number of statements in function / method body\nmax-statements=50\n\n# Maximum number of parents for a class (see R0901).\nmax-parents=7\n\n# Maximum number of attributes for a class (see R0902).\nmax-attributes=7\n\n# Minimum number of public methods for a class (see R0903).\nmin-public-methods=2\n\n# Maximum number of public methods for a class (see R0904).\nmax-public-methods=20\n\n# Maximum number of boolean expressions in a if statement\nmax-bool-expr=5\n\n\n[IMPORTS]\n\n# Deprecated modules which should not be used, separated by a comma\ndeprecated-modules=optparse\n\n# Create a graph of every (i.e. internal and external) dependencies in the\n# given file (report RP0402 must not be disabled)\nimport-graph=\n\n# Create a graph of external dependencies in the given file (report RP0402 must\n# not be disabled)\next-import-graph=\n\n# Create a graph of internal dependencies in the given file (report RP0402 must\n# not be disabled)\nint-import-graph=\n\n\n[EXCEPTIONS]\n\n# Exceptions that will emit a warning when being caught. Defaults to\n# \"Exception\"\novergeneral-exceptions=Exception\n"
  },
  {
    "path": "LICENSE.txt",
    "content": "Copyright (c) 2019 Brandon T. Willard\nCopyright (c) 2012 Matthew Rocklin\n\nAll rights reserved.\n\nRedistribution and use in source and binary forms, with or without\nmodification, are permitted provided that the following conditions are met:\n\n  a. Redistributions of source code must retain the above copyright notice,\n     this list of conditions and the following disclaimer.\n  b. Redistributions in binary form must reproduce the above copyright\n     notice, this list of conditions and the following disclaimer in the\n     documentation and/or other materials provided with the distribution.\n  c. Neither the name of kanren nor the names of its contributors\n     may be used to endorse or promote products derived from this software\n     without specific prior written permission.\n\n\nTHIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS \"AS IS\"\nAND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE\nIMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE\nARE DISCLAIMED. IN NO EVENT SHALL THE REGENTS OR CONTRIBUTORS BE LIABLE FOR\nANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL\nDAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR\nSERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER\nCAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT\nLIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY\nOUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH\nDAMAGE.\n"
  },
  {
    "path": "MANIFEST.in",
    "content": "include LICENSE.txt\ninclude pyproject.toml\ninclude README.md\ninclude setup.cfg\n\ngraft kanren\n\nprune .github\nprune doc\nprune examples  \nprune tests\nprune *egg-info\nprune *_cache\n\nexclude .gitattributes\nexclude .gitignore\nexclude .pre-commit-config.yaml\nexclude .pylintrc\nexclude Makefile\nexclude pytest.ini\nexclude release-notes\nexclude requirements.txt\nexclude tox.ini\nexclude *venv*\n\nglobal-exclude *.pyc\nglobal-exclude .DS_Store\nglobal-exclude __pycache__\n"
  },
  {
    "path": "Makefile",
    "content": ".PHONY: help venv conda docker docstyle format style black test lint check coverage pypi\n.DEFAULT_GOAL = help\n\nPYTHON = python3\nPIP = pip\nCONDA = conda\nSHELL = bash\n\nhelp:\n\t@printf \"Usage:\\n\"\n\t@grep -E '^[a-zA-Z_-]+:.*?# .*$$' $(MAKEFILE_LIST) | awk 'BEGIN {FS = \":.*?# \"}; {printf \"\\033[1;34mmake %-10s\\033[0m%s\\n\", $$1, $$2}'\n\nconda:  # Set up a conda environment for development.\n\t@printf \"Creating conda environment...\\n\"\n\t${CONDA} create --yes --name kanren-env python=3.6\n\t( \\\n\t${CONDA} activate kanren-env; \\\n\t${PIP} install -U pip; \\\n\t${PIP} install -r requirements.txt; \\\n\t${PIP} install -r requirements-dev.txt; \\\n\t${CONDA} deactivate; \\\n\t)\n\t@printf \"\\n\\nConda environment created! \\033[1;34mRun \\`conda activate kanren-env\\` to activate it.\\033[0m\\n\\n\\n\"\n\nvenv:  # Set up a Python virtual environment for development.\n\t@printf \"Creating Python virtual environment...\\n\"\n\trm -rf kanren-venv\n\t${PYTHON} -m venv kanren-venv\n\t( \\\n\tsource kanren-venv/bin/activate; \\\n\t${PIP} install -U pip; \\\n\t${PIP} install -r requirements.txt; \\\n\tdeactivate; \\\n\t)\n\t@printf \"\\n\\nVirtual environment created! \\033[1;34mRun \\`source kanren-venv/bin/activate\\` to activate it.\\033[0m\\n\\n\\n\"\n\ndocker:  # Set up a Docker image for development.\n\t@printf \"Creating Docker image...\\n\"\n\t${SHELL} ./scripts/container.sh --build\n\ndocstyle:\n\t@printf \"Checking documentation with pydocstyle...\\n\"\n\tpydocstyle kanren/\n\t@printf \"\\033[1;34mPydocstyle passes!\\033[0m\\n\\n\"\n\nformat:\n\t@printf \"Checking code style with black...\\n\"\n\tblack --check kanren/ tests/\n\t@printf \"\\033[1;34mBlack passes!\\033[0m\\n\\n\"\n\nstyle:\n\t@printf \"Checking code style with pylint...\\n\"\n\tpylint kanren/ tests/\n\t@printf \"\\033[1;34mPylint passes!\\033[0m\\n\\n\"\n\nblack:  # Format code in-place using black.\n\tblack kanren/ tests/\n\ntest:  # Test code using pytest.\n\tpytest -v tests/ kanren/ --cov=kanren/ --cov-report=xml --html=testing-report.html --self-contained-html\n\ncoverage: test\n\tdiff-cover coverage.xml --compare-branch=main --fail-under=100\n\nbuild-distribution:\n\t${PYTHON} -m venv .venv\n\t./.venv/bin/pip install --upgrade pip\n\t./.venv/bin/pip install build\n\t./.venv/bin/python -m build .\n\t@echo \"Built packages are in dist/\"\n\npypi: build-distribution\n\t${PYTHON} -m venv .venv\n\t./.venv/bin/pip install --upgrade pip\n\t./.venv/bin/pip install twine\n\ttwine upload --skip-existing dist/*;\n\nlint: docstyle format style  # Lint code using pydocstyle, black and pylint.\n\ncheck: lint test coverage  # Both lint and test code. Runs `make lint` followed by `make test`.\n"
  },
  {
    "path": "README.md",
    "content": "# `kanren`\n\n[![Build Status](https://travis-ci.org/pythological/kanren.svg?branch=main)](https://travis-ci.org/pythological/kanren) [![Coverage Status](https://coveralls.io/repos/github/pythological/kanren/badge.svg?branch=main)](https://coveralls.io/github/pythological/kanren?branch=main) [![PyPI](https://img.shields.io/pypi/v/miniKanren)](https://pypi.org/project/miniKanren/) [![Join the chat at https://gitter.im/pythological/kanren](https://badges.gitter.im/pythological/kanren.svg)](https://gitter.im/pythological/kanren?utm_source=badge&utm_medium=badge&utm_campaign=pr-badge&utm_content=badge)\n\nLogic/relational programming in Python with [miniKanren](http://minikanren.org/).\n\n## Installation\n\nUsing `pip`:\n```bash\npip install miniKanren\n```\n\nUsing `conda`:\n```bash\nconda install -c conda-forge miniKanren\n```\n\n## Development\n\nFirst obtain the project source:\n```bash\ngit clone git@github.com:pythological/kanren.git\ncd kanren\n```\n\nInstall the development dependencies:\n\n```bash\n$ pip install -r requirements.txt\n```\n\nSet up `pre-commit` hooks:\n\n```bash\n$ pre-commit install --install-hooks\n```\n\nTests can be run with the provided `Makefile`:\n```bash\nmake check\n```\n\n## Motivation\n\nLogic programming is a general programming paradigm.  This implementation however came about specifically to serve as an algorithmic core for Computer Algebra Systems in Python and for the automated generation and optimization of numeric software.  Domain specific languages, code generation, and compilers have recently been a hot topic in the Scientific Python community.  `kanren` aims to be a low-level core for these projects.\n\nThese points&mdash;along with `kanren` examples&mdash;are covered in the paper [\"miniKanren as a Tool for Symbolic Computation in Python\"](https://arxiv.org/abs/2005.11644).\n\n## Examples\n\n`kanren` enables one to express sophisticated relations&mdash;in the form of *goals*&mdash;and generate values that satisfy the relations.  The following code is the \"Hello, world!\" of logic programming; it asks for values of the *logic variable* `x` such that `x == 5`:\n\n```python\n>>> from kanren import run, eq, membero, var, lall\n>>> x = var()\n>>> run(1, x, eq(x, 5))\n(5,)\n```\n\nMultiple logic variables and goals can be used simultaneously.  The following code asks for one list containing the values of `x` and `z` such that `x == z` **and** `z == 3`:\n\n```python\n>>> z = var()\n>>> run(1, [x, z], eq(x, z),\n                   eq(z, 3))\n([3, 3],)\n```\n\n`kanren` uses [unification](http://en.wikipedia.org/wiki/Unification_%28computer_science%29) to match forms within expression trees.  The following code asks for values of `x` such that `(1, 2) == (1, x)`:\n\n```python\n>>> run(1, x, eq((1, 2), (1, x)))\n(2,)\n```\n\nThe above examples use `eq`: a *goal constructor* that creates a goal for unification between two objects.  Other goal constructors, such as `membero(item, coll)`, express more sophisticated relations and are often constructed from simpler ones like `eq`.  More specifically, `membero` states that `item` is a member of the collection `coll`.\n\nThe following example uses `membero` to ask for *all* values of `x`, such that `x` is a member of `(1, 2, 3)` **and** `x` is a member of `(2, 3, 4)`.\n\n```python\n>>> run(0, x, membero(x, (1, 2, 3)),  # x is a member of (1, 2, 3)\n              membero(x, (2, 3, 4)))  # x is a member of (2, 3, 4)\n(2, 3)\n```\n\nThe examples above made implicit use of the goal constructors `lall` and `lany`, which represent goal *conjunction* and *disjunction*, respectively.  Many useful relations can be expressed with `lall`, `lany`, and `eq` alone, but in `kanren` it's also easy to leverage the host language and explicitly create any relation expressible in Python.\n\n### Representing Knowledge\n\n`kanren` stores data as facts that state relationships between terms.  The following code creates a parent relationship and uses it to state facts about who is a parent of whom within the Simpsons family:\n\n```python\n>>> from kanren import Relation, facts\n>>> parent = Relation()\n>>> facts(parent, (\"Homer\", \"Bart\"),\n...               (\"Homer\", \"Lisa\"),\n...               (\"Abe\",  \"Homer\"))\n\n>>> run(1, x, parent(x, \"Bart\"))\n('Homer',)\n\n>>> run(2, x, parent(\"Homer\", x))\n('Lisa', 'Bart')\n```\n\nWe can use intermediate variables for more complex queries.  For instance, who is Bart's grandfather?\n\n```python\n>>> grandparent_lv, parent_lv = var(), var()\n>>> run(1, grandparent_lv, parent(grandparent_lv, parent_lv),\n                           parent(parent_lv, 'Bart'))\n('Abe',)\n```\n\nWe can express the grandfather relationship as a distinct relation by creating a goal constructor:\n```python\n>>> def grandparent(x, z):\n...     y = var()\n...     return lall(parent(x, y), parent(y, z))\n\n>>> run(1, x, grandparent(x, 'Bart'))\n('Abe,')\n```\n\n## Constraints\n\n`kanren` provides a fully functional constraint system that allows one to restrict unification and object types:\n\n```python\n>>> from kanren.constraints import neq, isinstanceo\n\n>>> run(0, x,\n...     neq(x, 1),  # Not \"equal\" to 1\n...     neq(x, 3),  # Not \"equal\" to 3\n...     membero(x, (1, 2, 3)))\n(2,)\n\n>>> from numbers import Integral\n>>> run(0, x,\n...     isinstanceo(x, Integral),  # `x` must be of type `Integral`\n...     membero(x, (1.1, 2, 3.2, 4)))\n(2, 4)\n```\n\n## Graph Relations\n\n`kanren` comes with support for relational graph operations suitable for basic symbolic algebra operations.  See the examples in [`doc/graphs.md`](doc/graphs.md).\n\n## Extending `kanren`\n\n`kanren` uses the [`logical-unification` library](https://github.com/pythological/unification) to support pattern matching on user defined types.  Essentially, types that can be unified can be used with most `kanren` goals.  See the [`logical-unification` project's examples](https://github.com/pythological/unification#examples) for demonstrations of how arbitrary types can be made unifiable.\n\n## About\n\nThis project is a fork of [`logpy`](https://github.com/logpy/logpy).\n\n## References\n\n* [Logic Programming on wikipedia](http://en.wikipedia.org/wiki/Logic_programming)\n* [miniKanren](http://minikanren.org/), a Scheme library for relational programming on which this library is based.  More information can be found in the\n[thesis of William\nByrd](https://scholarworks.iu.edu/dspace/bitstream/handle/2022/8777/Byrd_indiana_0093A_10344.pdf).\n"
  },
  {
    "path": "doc/basic.md",
    "content": "# Basics of `miniKanren`\n\nThe design of `miniKanren` is simple.  It orchestrates only a few basic operations and yields a lot!\n\n## Terms\n\nTerms can be\n\n- any Python object (e.g. `1`, `[1, 2]`, `object()`, etc.),\n- logical variables constructed with `var`&mdash;denoted here by a tilde prefix (e.g. `~x`),\n- or combinations of the two (e.g. `(1, ~x, 'cat')`)\n\nIn short, they are trees in which leaves may be either constants or variables.  Constants may be of any Python type.\n\n## Unification\n\nWe *unify* two similar terms like `(1, 2)` and `(1, ~x)` to form a *substitution* `{~x: 2}`.  We say that `(1, 2)` and `(1, ~x)` unify under the substitution `{~x: 2}`.  Variables may assume the value of any term.\n\n`unify` is a function, provided by the [`logical-unification`](https://github.com/pythological/unification) library, that takes two terms, `u` and `v`, and returns a substitution `s`.\n\nExamples that unify\n\n|       u           |       v           |        s          |\n|:-----------------:|:-----------------:|:-----------------:|\n| 123               | 123               | {}                |\n| 'cat'             | 'cat'             | {}                |\n| (1, 2)            | (1, 2)            | {}                |\n| ~x                | 1                 | {~x: 1}           |\n| 1                 | ~x                | {~x: 1}           |\n| (1, ~x)           | (1, 2)            | {~x: 2}           |\n| (1, 1)            | (~x, ~x)          | {~x: 1}           |\n| (1, 2, ~x)        | (~y, 2, 3)        | {~x: 3, ~y: 1}    |\n\nExamples that don't unify\n\n|       u           |       v           |\n|:-----------------:|:-----------------:|\n| 123               | 'cat'             |\n| (1, 2)            | 12                |\n| (1, ~x)           | (2, 2)            |\n| (1, 2)            | (~x, ~x)          |\n\nActually we lied, `unify` also takes a substitution as input.  This allows us to keep some history around.  For example:\n\n```python\n>>> unify((1, 2), (1, x), {})  # normal case\n{~x: 2}\n>>> unify((1, 2), (1, x), {x: 2})  # x is already two. This is consitent\n{~x: 2}\n>>> unify((1, 2), (1, x), {x: 3})  # x is already three.  This conflicts\nFalse\n```\n\n## Reification\n\nReification is the opposite of unification.  `reify` transforms a term with logic variables like `(1, ~x)` and a substitution like `{~x: 2}` into a term without logic variables like `(1, 2)`.\n```python\n>>> reify((1, x), {x: 2})\n(1, 2)\n```\n\n## Goals and Goal Constructors\n\nA *goal* is a function from one substitution to a stream of substitutions.\n\n```\ngoal :: substitution -> [substitutions]\n```\n\nWe make goals with a *goal constructors*.  Goal constructors are the normal building block of a logical program.  Lets look at the goal constructor `membero` which states that the first input must be a member of the second input (a collection).\n\n```\ngoal = membero(x, (1, 2, 3)\n```\n\nWe can feed this goal a substitution and it will give us a stream of substitutions.  Here we'll feed it the substitution with no information and it will tell us that either `x` can be `1` or `x` can be `2` or `x` can be `3`\n\n```python\n>>> for s in goal({}):\n...     print s\n{~x: 1}\n{~x: 2}\n{~x: 3}\n```\nWhat if we already know that `x` is `2`?\n```python\n>>> for s in goal({x: 2}):\n...     print s\n{~x: 2}\n```\n\nRemember *goals* are functions from one substitution to a stream of substitutions.  Users usually make goals with *goal constructors* like `eq`, or `membero`.\n\n### Stream Functions\n\nAfter this point `miniKanren` is just a library to manage streams of substitutions.\n\nFor example if we know both that `membero(x, (1, 2, 3))` and `membero(x, (2, 3, 4))` then we could do something like the following:\n\n```python\n>>> g1 = membero(x, (1, 2, 3))\n>>> g2 = membero(x, (2, 3, 4))\n>>> for s in g1({}):\n...     for ss in g2(s):\n...         print ss\n{~x: 2}\n{~x: 3}\n```\nLogic programs can have many goals in complex hierarchies.  Writing explicit for loops would quickly become tedious.  Instead `miniKanren` provide functions that perform logic-like operations on goal streams.\n\n```\ncombinator :: [goals] -> goal\n```\n\nTwo important stream functions are logical all `lall` and logical any `lany`.\n```python\n>>> g = lall(g1, g2)\n>>> for s in g({}):\n...     print s\n{~x: 2}\n{~x: 3}\n\n>>> g = lany(g1, g2)\n>>> for s in g({}):\n...     print s\n{~x: 1}\n{~x: 2}\n{~x: 3}\n{~x: 4}\n```\n\n### Laziness\n\nGoals produce a stream of substitutions.  This stream is computed lazily, returning values only as they are needed.  `miniKanren` depends on standard Python generators to maintain the necessary state and control flow.\n\n```python\n>>> stream = g({})\n>>> stream\n<generator object unique at 0x2e13690>\n>>> next(stream)\n{~x: 1}\n```\n\n## User Interface\n\nTraditionally programs are run with the `run` function\n\n```python\n>>> x = var()\n>>> run(0, x, membero(x, (1, 2, 3)), membero(x, (2, 3, 4)))\n(2, 3)\n```\n`run` has an implicit `lall` for the goals at the end of the call.  It `reifies` results when it returns so that the user never has to touch logic variables or substitutions.\n\n## Conclusion\n\nThese are all the fundamental concepts that exist in `miniKanren`.  To summarize:\n\n- *Term*: a Python object, logic variable, or combination of the two\n- *Substitution Map*: a dictionary mapping logic variables to terms\n- *Unification*: A function that finds logic variable substitutions that make two terms equal\n- *Reification*: A function that substitutes logic variables in a term with values given by a substitution map\n- *Goal*: A generator function that takes a substitution and yields a stream of substitutions\n- *Goal Constructor*: A user-level function that constructs and returns a goal\n"
  },
  {
    "path": "doc/graphs.md",
    "content": "# Relational Graph Manipulation\n\nIn this document, we show how `kanren` can be used to perform symbolic algebra operations *relationally*.\n\n## Setup\n\nFirst, we import the necessary modules and create a helper function for pretty printing the algebraic expressions.\n\n```python\nfrom math import log, exp\nfrom numbers import Real\nfrom functools import partial\nfrom operator import add, mul\n\nfrom unification import var\n\nfrom etuples.core import etuple, ExpressionTuple\n\nfrom kanren import run, eq, conde, lall\nfrom kanren.core import success\nfrom kanren.graph import walko, reduceo\nfrom kanren.constraints import isinstanceo\n\n# Just some nice formatting\ndef etuple_str(self):\n    if len(self) > 0:\n        return f\"{getattr(self[0], '__name__', self[0])}({', '.join(map(str, self[1:]))})\"\n    else:\n        return 'noop'\n\n\nExpressionTuple.__str__ = etuple_str\ndel ExpressionTuple._repr_pretty_\n\n```\n\nNext, we create a simple goal constructor that implements the algebraic relations `x + x == 2 * x` and `log(exp(x)) == x` and\nconstrains the input types to real numbers and expression tuples from the [`etuples`](https://github.com/pythological/etuples) package.\n\n```python\ndef single_math_reduceo(expanded_term, reduced_term):\n    \"\"\"Construct a goal for some simple math reductions.\"\"\"\n    # Create a logic variable to represent our variable term \"x\"\n    x_lv = var()\n    # `conde` is a relational version of Lisp's `cond`/if-else; here, each\n    # \"branch\" pairs the right- and left-hand sides of a replacement rule with\n    # the corresponding inputs.\n    return lall(\n        isinstanceo(x_lv, Real),\n        isinstanceo(x_lv, ExpressionTuple),\n        conde(\n            # add(x, x) == mul(2, x)\n            [eq(expanded_term, etuple(add, x_lv, x_lv)),\n             eq(reduced_term, etuple(mul, 2, x_lv))],\n            # log(exp(x)) == x\n            [eq(expanded_term, etuple(log, etuple(exp, x_lv))),\n             eq(reduced_term, x_lv)]),\n    )\n\n```\n\nIn order to obtain \"fully reduced\" results, we need to turn `math_reduceo` into a fixed-point-producing relation (i.e. recursive).\n```python\nmath_reduceo = partial(reduceo, single_math_reduceo)\n```\n\nWe also need a relation that walks term graphs specifically (i.e. graphs composed of operator and operand combinations) and necessarily produces its output in the form of expression tuples.\n```python\nterm_walko = partial(walko, rator_goal=eq, null_type=ExpressionTuple)\n```\n\n## Reductions\n\nThe following example is a straight-forward reduction&mdash;i.e. left-to-right applications of the relations in `math_reduceo`&mdash;of the term `add(etuple(add, 3, 3), exp(log(exp(5))))`.  This is the direction in which results are normally computed in symbolic algebra libraries.\n\n```python\n# This is the term we want to reduce\nexpanded_term = etuple(add, etuple(add, 3, 3), etuple(exp, etuple(log, etuple(exp, 5))))\n\n# Create a logic variable to represent the results we want to compute\nreduced_term = var()\n\n# Asking for 0 results means all results\nres = run(3, reduced_term, term_walko(math_reduceo, expanded_term, reduced_term))\n```\n\n```python\n>>> print('\\n'.join((f'{expanded_term} == {r}' for r in res)))\nadd(add(3, 3), exp(log(exp(5)))) == add(mul(2, 3), exp(5))\nadd(add(3, 3), exp(log(exp(5)))) == add(add(3, 3), exp(5))\nadd(add(3, 3), exp(log(exp(5)))) == add(mul(2, 3), exp(log(exp(5))))\n```\n\n## Expansions\n\nIn this example, we're specifying a grounded reduced term (i.e. `mul(2, 5)`) and an unground expanded term (i.e. the logic variable `q_lv`).  We're essentially asking for *graphs that would reduce to `mul(2, 5)`*.  Naturally, there are infinitely many graphs that reduce to `mul(2, 5)`, so we're only going to ask for ten of them; nevertheless, miniKanren is inherently capable of handling infinitely many results through its use of lazily evaluated goal streams.\n\n```python\nexpanded_term = var()\nreduced_term = etuple(mul, 2, 5)\n\n# Ask for 10 results of `q_lv`\nres = run(10, expanded_term, term_walko(math_reduceo, expanded_term, reduced_term))\n```\n```python\n>>> rjust = max(map(lambda x: len(str(x)), res))\n>>> print('\\n'.join((f'{str(r):>{rjust}} == {reduced_term}' for r in res)))\n                                        add(5, 5) == mul(2, 5)\n                    mul(log(exp(2)), log(exp(5))) == mul(2, 5)\n                              log(exp(add(5, 5))) == mul(2, 5)\n                              mul(2, log(exp(5))) == mul(2, 5)\n                    log(exp(log(exp(add(5, 5))))) == mul(2, 5)\n          mul(log(exp(log(exp(2)))), log(exp(5))) == mul(2, 5)\n          log(exp(log(exp(log(exp(add(5, 5))))))) == mul(2, 5)\n                    mul(2, log(exp(log(exp(5))))) == mul(2, 5)\nlog(exp(log(exp(log(exp(log(exp(add(5, 5))))))))) == mul(2, 5)\nmul(log(exp(log(exp(log(exp(2)))))), log(exp(5))) == mul(2, 5)\n```\n\n## Expansions _and_ Reductions\nNow, we set **both** term graphs to unground logic variables.\n\n```python\nexpanded_term = var()\nreduced_term = var()\n\nres = run(10, [expanded_term, reduced_term],\n          term_walko(math_reduceo, expanded_term, reduced_term))\n```\n\n```python\n>>> rjust = max(map(lambda x: len(str(x[0])), res))\n>>> print('\\n'.join((f'{str(e):>{rjust}} == {str(r)}' for e, r in res)))\n                                        add(~_2291, ~_2291) == mul(2, ~_2291)\n                                                   ~_2288() == ~_2288()\n                              log(exp(add(~_2297, ~_2297))) == mul(2, ~_2297)\n                                ~_2288(add(~_2303, ~_2303)) == ~_2288(mul(2, ~_2303))\n                    log(exp(log(exp(add(~_2309, ~_2309))))) == mul(2, ~_2309)\n                                             ~_2288(~_2294) == ~_2288(~_2294)\n          log(exp(log(exp(log(exp(add(~_2315, ~_2315))))))) == mul(2, ~_2315)\n                                           ~_2288(~_2300()) == ~_2288(~_2300())\nlog(exp(log(exp(log(exp(log(exp(add(~_2325, ~_2325))))))))) == mul(2, ~_2325)\n                        ~_2288(~_2294, add(~_2331, ~_2331)) == ~_2288(~_2294, mul(2, ~_2331))\n```\n\nThe symbols prefixed by `~` are the string form of logic variables, so a result like `add(~_2291, ~_2291)` essentially means `add(x, x)` for some variable `x`.  In this instance, miniKanren has used our algebraic relations in `math_reduceo` to produce more relations&mdash;even some with variable operators with multiple arities!\n\nWith additional goals, we can narrow-in on very specific types of expressions.  In the following, we state that `expanded_term` must be the [`cons`](https://github.com/pythological/python-cons) of a `log` and logic variable (i.e. anything else).  In other words, we're stating that the operator of `expanded_term` must be a `log`, or that we want all expressions expanding to a `log`.\n\n```python\nfrom kanren.goals import conso\n\nres = run(10, [expanded_term, reduced_term],\n          conso(log, var(), expanded_term),\n          term_walko(math_reduceo, expanded_term, reduced_term))\n```\n```python\n>>> rjust = max(map(lambda x: len(str(x[0])), res))\n>>> print('\\n'.join((f'{str(e):>{rjust}} == {str(r)}' for e, r in res)))\n                              log(exp(add(~_2344, ~_2344))) == mul(2, ~_2344)\n                                                      log() == log()\n                                    log(exp(~reduced_2285)) == ~reduced_2285\n                                   log(add(~_2354, ~_2354)) == log(mul(2, ~_2354))\n                    log(exp(log(exp(add(~_2360, ~_2360))))) == mul(2, ~_2360)\n                                                log(~_2347) == log(~_2347)\n          log(exp(log(exp(log(exp(add(~_2366, ~_2366))))))) == mul(2, ~_2366)\n                                              log(~_2351()) == log(~_2351())\nlog(exp(log(exp(log(exp(log(exp(add(~_2376, ~_2376))))))))) == mul(2, ~_2376)\n                           log(~_2347, add(~_2382, ~_2382)) == log(~_2347, mul(2, ~_2382))\n```\n\nThe output contains a nullary `log` function, which isn't a valid expression.  We can restrict this type of output by further stating that the `log` expression's `cdr` term is itself the result of a `cons` and, thus, not an empty sequence.\n\n```python\nexp_term_cdr = var()\n\nres = run(10, [expanded_term, reduced_term],\n          conso(log, exp_term_cdr, expanded_term),\n          conso(var(), var(), exp_term_cdr),\n          term_walko(math_reduceo, expanded_term, reduced_term))\n```\n```python\n>>> rjust = max(map(lambda x: len(str(x[0])), res))\n>>> print('\\n'.join((f'{str(e):>{rjust}} == {str(r)}' for e, r in res)))\n                              log(exp(add(~_2457, ~_2457))) == mul(2, ~_2457)\n                                   log(add(~_2467, ~_2467)) == log(mul(2, ~_2467))\n                                           log(exp(~_2446)) == ~_2446\n                                                log(~_2460) == log(~_2460)\n                    log(exp(log(exp(add(~_2477, ~_2477))))) == mul(2, ~_2477)\n                                              log(~_2464()) == log(~_2464())\n          log(exp(log(exp(log(exp(add(~_2487, ~_2487))))))) == mul(2, ~_2487)\n                           log(~_2460, add(~_2493, ~_2493)) == log(~_2460, mul(2, ~_2493))\nlog(exp(log(exp(log(exp(log(exp(add(~_2499, ~_2499))))))))) == mul(2, ~_2499)\n                         log(log(exp(add(~_2501, ~_2501)))) == log(mul(2, ~_2501))\n```\n"
  },
  {
    "path": "examples/__init__.py",
    "content": ""
  },
  {
    "path": "examples/account.py",
    "content": "class Account(object):\n    def __init__(self, first, last, id, balance):\n        self.first = first\n        self.last = last\n        self.id = id\n        self.balance = balance\n\n    def info(self):\n        return (self.first, self.last, self.id, self.balance)\n\n    def __eq__(self, other):\n        if isinstance(other, type(self)):\n            return self.info() == other.info()\n        return False\n\n    def __hash__(self):\n        return hash((type(self), self.info()))\n\n    def __str__(self):\n        return \"Account: %s %s, id %d, balance %d\" % self.info()\n\n    __repr__ = __str__\n"
  },
  {
    "path": "examples/commutative.py",
    "content": "from kanren import fact, run, var\nfrom kanren.assoccomm import associative, commutative\nfrom kanren.assoccomm import eq_assoccomm as eq\n\n\n# Define some dummy Operationss\nadd = \"add\"\nmul = \"mul\"\n\n# Declare that these ops are commutative using the facts system\nfact(commutative, mul)\nfact(commutative, add)\nfact(associative, mul)\nfact(associative, add)\n\n# Define some logic variables\nx, y = var(), var()\n\n# Two expressions to match\npattern = (mul, (add, 1, x), y)  # (1 + x) * y\nexpr = (mul, 2, (add, 3, 1))  # 2 * (3 + 1)\n\nres = run(0, (x, y), eq(pattern, expr))\nprint(res)\n# prints ((3, 2),) meaning\n#   x matches to 3\n#   y matches to 2\n"
  },
  {
    "path": "examples/corleone.py",
    "content": "\"\"\"\nFamily relationships from The Godfather Translated from the core.logic example\nfound in \"The Magical Island of Kanren - core.logic Intro Part 1\"\nhttp://objectcommando.com/blog/2011/11/04/the-magical-island-of-kanren-core-logic-intro-part-1/\n\"\"\"\nimport toolz\n\nfrom kanren import Relation, conde, facts, run, var\n\n\nfather = Relation()\nmother = Relation()\n\nfacts(\n    father,\n    (\"Vito\", \"Michael\"),\n    (\"Vito\", \"Sonny\"),\n    (\"Vito\", \"Fredo\"),\n    (\"Michael\", \"Anthony\"),\n    (\"Michael\", \"Mary\"),\n    (\"Sonny\", \"Vicent\"),\n    (\"Sonny\", \"Francesca\"),\n    (\"Sonny\", \"Kathryn\"),\n    (\"Sonny\", \"Frank\"),\n    (\"Sonny\", \"Santino\"),\n)\n\nfacts(\n    mother,\n    (\"Carmela\", \"Michael\"),\n    (\"Carmela\", \"Sonny\"),\n    (\"Carmela\", \"Fredo\"),\n    (\"Kay\", \"Mary\"),\n    (\"Kay\", \"Anthony\"),\n    (\"Sandra\", \"Francesca\"),\n    (\"Sandra\", \"Kathryn\"),\n    (\"Sandra\", \"Frank\"),\n    (\"Sandra\", \"Santino\"),\n)\n\nq = var()\n\nprint((run(0, q, father(\"Vito\", q))))  # Vito is the father of who?\n# ('Sonny', 'Michael', 'Fredo')\n\n\nprint((run(0, q, father(q, \"Michael\"))))  # Who is the father of Michael?\n# ('Vito',)\n\n\ndef parent(p, child):\n    return conde([father(p, child)], [mother(p, child)])\n\n\nprint((run(0, q, parent(q, \"Michael\"))))  # Who is a parent of Michael?\n# ('Vito', 'Carmela')\n\n\ndef grandparent(gparent, child):\n    p = var()\n    return conde((parent(gparent, p), parent(p, child)))\n\n\nprint(run(0, q, grandparent(q, \"Anthony\")))  # Who is a grandparent of Anthony?\n# ('Vito', 'Carmela')\n\n\nprint(run(0, q, grandparent(\"Vito\", q)))  # Vito is a grandparent of whom?\n# ('Vicent', 'Anthony', 'Kathryn', 'Mary', 'Frank', 'Santino', 'Francesca')\n\n\ndef sibling(a, b):\n    p = var()\n    return conde((parent(p, a), parent(p, b)))\n\n\n# All spouses\nx, y, z = var(), var(), var()\n\nprint(run(0, (x, y), father(x, z), mother(y, z), results_filter=toolz.unique))\n# (('Sonny', 'Sandra'), ('Vito', 'Carmela'), ('Michael', 'Kay'))\n"
  },
  {
    "path": "examples/data/adjacent-states.txt",
    "content": "# Author Gregg Lind\n# License:  Public Domain.    I would love to hear about any projects you use\n# if it for though!\n# http://writeonly.wordpress.com/2009/03/20/adjacency-list-of-states-of-the-united-states-us/\n\nAK\nAL,MS,TN,GA,FL\nAR,MO,TN,MS,LA,TX,OK\nAZ,CA,NV,UT,CO,NM\nCA,OR,NV,AZ\nCO,WY,NE,KS,OK,NM,AZ,UT\nCT,NY,MA,RI\nDC,MD,VA\nDE,MD,PA,NJ\nFL,AL,GA\nGA,FL,AL,TN,NC,SC\nHI\nIA,MN,WI,IL,MO,NE,SD\nID,MT,WY,UT,NV,OR,WA\nIL,IN,KY,MO,IA,WI\nIN,MI,OH,KY,IL\nKS,NE,MO,OK,CO\nKY,IN,OH,WV,VA,TN,MO,IL\nLA,TX,AR,MS\nMA,RI,CT,NY,NH,VT\nMD,VA,WV,PA,DC,DE\nME,NH\nMI,WI,IN,OH\nMN,WI,IA,SD,ND\nMO,IA,IL,KY,TN,AR,OK,KS,NE\nMS,LA,AR,TN,AL\nMT,ND,SD,WY,ID\nNC,VA,TN,GA,SC\nND,MN,SD,MT\nNE,SD,IA,MO,KS,CO,WY\nNH,VT,ME,MA\nNJ,DE,PA,NY\nNM,AZ,UT,CO,OK,TX\nNV,ID,UT,AZ,CA,OR\nNY,NJ,PA,VT,MA,CT\nOH,PA,WV,KY,IN,MI\nOK,KS,MO,AR,TX,NM,CO\nOR,CA,NV,ID,WA\nPA,NY,NJ,DE,MD,WV,OH\nRI,CT,MA\nSC,GA,NC\nSD,ND,MN,IA,NE,WY,MT\nTN,KY,VA,NC,GA,AL,MS,AR,MO\nTX,NM,OK,AR,LA\nUT,ID,WY,CO,NM,AZ,NV\nVA,NC,TN,KY,WV,MD,DC\nVT,NY,NH,MA\nWA,ID,OR\nWI,MI,MN,IA,IL\nWV,OH,PA,MD,VA,KY\nWY,MT,SD,NE,CO,UT,ID\n"
  },
  {
    "path": "examples/data/coastal-states.txt",
    "content": "WA,OR,CA,TX,LA,MI,AL,GA,FL,SC,NC,VI,MD,DW,NJ,NY,CT,RI,MA,MN,NH\n"
  },
  {
    "path": "examples/states.py",
    "content": "\"\"\"\nAn example showing how to use facts and relations to store data and query data\n\nThis example builds a small database of the US states.\n\nThe `adjacency` relation expresses which states border each other.\nThe `coastal` relation expresses which states border the ocean.\n\"\"\"\nfrom kanren import Relation, fact, run, var\n\n\nadjacent = Relation()\ncoastal = Relation()\n\n\ncoastal_states = (\n    \"WA,OR,CA,TX,LA,MS,AL,GA,FL,SC,NC,VA,MD,DE,NJ,NY,CT,RI,MA,ME,NH,AK,HI\".split(\",\")\n)\n\n# ['NY', 'NJ', 'CT', ...]\nfor state in coastal_states:\n    # E.g. 'NY' is coastal\n    fact(coastal, state)\n\n# Lines like 'CA,OR,NV,AZ'\nwith open(\"examples/data/adjacent-states.txt\") as f:\n    adjlist = [line.strip().split(\",\") for line in f if line and line[0].isalpha()]\n\n# ['CA', 'OR', 'NV', 'AZ']\nfor L in adjlist:\n    # 'CA', ['OR', 'NV', 'AZ']\n    head, tail = L[0], L[1:]\n    for state in tail:\n        # E.g. 'CA' is adjacent to 'OR', 'CA' is adjacent to 'NV', etc.\n        fact(adjacent, head, state)\n\nx = var()\ny = var()\n\n# Is California adjacent to New York?\nprint(run(0, x, adjacent(\"CA\", \"NY\")))\n# ()\n\n# All states next to California\nprint(run(0, x, adjacent(\"CA\", x)))\n# ('AZ', 'OR', 'NV')\n\n# All coastal states next to Texas\nprint(run(0, x, adjacent(\"TX\", x), coastal(x)))\n# ('LA',)\n\n# Five states that border a coastal state\nprint(run(5, x, coastal(y), adjacent(x, y)))\n# ('LA', 'NM', 'OK', 'AR', 'RI')\n\n# All states adjacent to Tennessee and adjacent to Florida\nprint(run(0, x, adjacent(\"TN\", x), adjacent(\"FL\", x)))\n# ('AL', 'GA')\n"
  },
  {
    "path": "examples/user_classes.py",
    "content": "from operator import add, gt, sub\n\nfrom examples.account import Account\nfrom kanren import eq, membero, run, unifiable, var\nfrom kanren.core import lall\nfrom kanren.term import applyo, term  # noqa: F401\n\n\nunifiable(Account)  # Register Account class\n\naccounts = (\n    Account(\"Adam\", \"Smith\", 1, 20),\n    Account(\"Carl\", \"Marx\", 2, 3),\n    Account(\"John\", \"Rockefeller\", 3, 1000),\n)\n\n# optional name strings are helpful for debugging\nfirst = var(prefix=\"first\")\nlast = var(prefix=\"last\")\nident = var(prefix=\"ident\")\nbalance = var(prefix=\"balance\")\nnewbalance = var(prefix=\"newbalance\")\n\n# Describe a couple of transformations on accounts\nsource = Account(first, last, ident, balance)\ntarget = Account(first, last, ident, newbalance)\n\ntheorists = (\"Adam\", \"Carl\")\n# Give $10 to theorists\ntheorist_bonus = lall(\n    membero(source, accounts),\n    membero(first, theorists),\n    applyo(add, (10, balance), newbalance),\n)\n\n# Take $10 from anyone with more than $100\na = var(prefix=\"a\")\ntax_the_rich = lall(\n    membero(source, accounts),\n    applyo(gt, (balance, 100), a),\n    eq(a, True),\n    applyo(sub, (balance, 10), newbalance),\n)\n\nprint(\"Take $10 from anyone with more than $100\")\nprint(run(0, target, tax_the_rich))\n\nprint(\"Give $10 to theorists\")\nprint(run(0, target, theorist_bonus))\n"
  },
  {
    "path": "examples/zebra-puzzle.py",
    "content": "\"\"\"\nZebra puzzle as published in Life International in 1962.\nhttps://en.wikipedia.org/wiki/Zebra_Puzzle\n\"\"\"\nfrom dataclasses import dataclass, field\nfrom typing import Union\n\nfrom unification import Var, unifiable, var, vars\n\nfrom kanren import conde, eq, lall, membero, run\n\n\n@unifiable\n@dataclass\nclass House:\n    nationality: Union[str, Var] = field(default_factory=var)\n    drink: Union[str, Var] = field(default_factory=var)\n    animal: Union[str, Var] = field(default_factory=var)\n    cigarettes: Union[str, Var] = field(default_factory=var)\n    color: Union[str, Var] = field(default_factory=var)\n\n\ndef righto(right, left, houses):\n    \"\"\"Express that `right` is on the right of `left` among all the houses.\"\"\"\n    neighbors = tuple(zip(houses[:-1], houses[1:]))\n    return membero((left, right), neighbors)\n\n\ndef nexto(a, b, houses):\n    \"\"\"Express that `a` and `b` are next to each other.\"\"\"\n    return conde([righto(a, b, houses)], [righto(b, a, houses)])\n\n\n# And now for the riddle\nhouses = vars(5)\ngoals = lall(\n    membero(House(\"Englishman\", color=\"red\"), houses),\n    membero(House(\"Spaniard\", animal=\"dog\"), houses),\n    membero(House(drink=\"coffee\", color=\"green\"), houses),\n    membero(House(\"Ukrainian\", drink=\"tea\"), houses),\n    righto(House(color=\"green\"), House(color=\"ivory\"), houses),\n    membero(House(animal=\"snails\", cigarettes=\"Old Gold\"), houses),\n    membero(House(color=\"yellow\", cigarettes=\"Kools\"), houses),\n    eq(House(drink=\"milk\"), houses[2]),\n    eq(House(\"Norwegian\"), houses[0]),\n    nexto(House(cigarettes=\"Chesterfields\"), House(animal=\"fox\"), houses),\n    nexto(House(cigarettes=\"Kools\"), House(animal=\"horse\"), houses),\n    membero(House(drink=\"orange juice\", cigarettes=\"Lucky Strike\"), houses),\n    membero(House(\"Japanese\", cigarettes=\"Parliaments\"), houses),\n    nexto(House(\"Norwegian\"), House(color=\"blue\"), houses),\n    membero(House(drink=\"water\"), houses),\n    membero(House(animal=\"zebra\"), houses),\n)\n\n\nresults = run(0, houses, goals)\nprint(results)\n# (\n#     [\n#         House(\n#             nationality=\"Norwegian\",\n#             drink=\"water\",\n#             animal=\"fox\",\n#             cigarettes=\"Kools\",\n#             color=\"yellow\",\n#         ),\n#         House(\n#             nationality=\"Ukrainian\",\n#             drink=\"tea\",\n#             animal=\"horse\",\n#             cigarettes=\"Chesterfields\",\n#             color=\"blue\",\n#         ),\n#         House(\n#             nationality=\"Englishman\",\n#             drink=\"milk\",\n#             animal=\"snails\",\n#             cigarettes=\"Old Gold\",\n#             color=\"red\",\n#         ),\n#         House(\n#             nationality=\"Spaniard\",\n#             drink=\"orange juice\",\n#             animal=\"dog\",\n#             cigarettes=\"Lucky Strike\",\n#             color=\"ivory\",\n#         ),\n#         House(\n#             nationality=\"Japanese\",\n#             drink=\"coffee\",\n#             animal=\"zebra\",\n#             cigarettes=\"Parliaments\",\n#             color=\"green\",\n#         ),\n#     ],\n# )\n"
  },
  {
    "path": "kanren/__init__.py",
    "content": "# flake8: noqa\n\"\"\"kanren is a Python library for logic and relational programming.\"\"\"\nfrom importlib.metadata import version\n\nfrom unification import Var, isvar, reify, unifiable, unify, var, variables, vars\n\nfrom .core import conde, eq, lall, lany, run\nfrom .facts import Relation, fact, facts\nfrom .goals import (\n    appendo,\n    conso,\n    heado,\n    itero,\n    membero,\n    nullo,\n    permuteo,\n    permuteq,\n    rembero,\n    tailo,\n)\nfrom .term import arguments, operator, term, unifiable_with_term\n\n\n__version__ = version(\"miniKanren\")\n"
  },
  {
    "path": "kanren/assoccomm.py",
    "content": "\"\"\"Functions for associative and commutative unification.\n\nThis module provides goals for associative and commutative unification.  It\naccomplishes this through naively trying all possibilities.  This was built to\nbe used in the computer algebra systems SymPy and Theano.\n\n>>> from kanren import run, var, fact\n>>> from kanren.assoccomm import eq_assoccomm as eq\n>>> from kanren.assoccomm import commutative, associative\n\n>>> # Define some dummy Ops\n>>> add = 'add'\n>>> mul = 'mul'\n\n>>> # Declare that these ops are commutative using the facts system\n>>> fact(commutative, mul)\n>>> fact(commutative, add)\n>>> fact(associative, mul)\n>>> fact(associative, add)\n\n>>> # Define some wild variables\n>>> x, y = var('x'), var('y')\n\n>>> # Two expressions to match\n>>> pattern = (mul, (add, 1, x), y)                # (1 + x) * y\n>>> expr    = (mul, 2, (add, 3, 1))                # 2 * (3 + 1)\n\n>>> print(run(0, (x,y), eq(pattern, expr)))\n((3, 2),)\n\"\"\"\nfrom collections.abc import Sequence\nfrom functools import partial\nfrom operator import eq as equal\nfrom operator import length_hint\n\nfrom cons.core import ConsPair, car, cdr\nfrom etuples import etuple\nfrom toolz import sliding_window\nfrom unification import reify, unify, var\n\nfrom .core import conde, eq, ground_order, lall, succeed\nfrom .facts import Relation\nfrom .goals import itero, permuteo\nfrom .graph import term_walko\nfrom .term import term\n\n\nassociative = Relation(\"associative\")\ncommutative = Relation(\"commutative\")\n\n\ndef flatten_assoc_args(op_predicate, items):\n    for i in items:\n        if isinstance(i, ConsPair) and op_predicate(car(i)):\n            i_cdr = cdr(i)\n            if length_hint(i_cdr) > 0:\n                yield from flatten_assoc_args(op_predicate, i_cdr)\n            else:\n                yield i\n        else:\n            yield i\n\n\ndef assoc_args(rator, rands, n, ctor=None):\n    \"\"\"Produce all associative argument combinations of rator + rands in n-sized rand groupings.\n\n    >>> from kanren.assoccomm import assoc_args\n    >>> list(assoc_args('op', [1, 2, 3], 2))\n    [[['op', 1, 2], 3], [1, ['op', 2, 3]]]\n    \"\"\"  # noqa: E501\n    assert n > 0\n\n    rands_l = list(rands)\n\n    if ctor is None:\n        ctor = type(rands)\n\n    if n == len(rands_l):\n        yield ctor(rands)\n        return\n\n    for i, new_rands in enumerate(sliding_window(n, rands_l)):\n        prefix = rands_l[:i]\n        new_term = term(rator, ctor(new_rands))\n        suffix = rands_l[n + i :]\n        res = ctor(prefix + [new_term] + suffix)\n        yield res\n\n\ndef eq_assoc_args(\n    op, a_args, b_args, n=None, inner_eq=eq, no_ident=False, null_type=etuple\n):\n    \"\"\"Create a goal that applies associative unification to an operator and two sets of arguments.\n\n    This is a non-relational utility goal.  It does assumes that the op and at\n    least one set of arguments are ground under the state in which it is\n    evaluated.\n    \"\"\"  # noqa: E501\n    u_args, v_args = var(), var()\n\n    def eq_assoc_args_goal(S):\n        nonlocal op, u_args, v_args, n\n\n        (op_rf, u_args_rf, v_args_rf, n_rf) = reify((op, u_args, v_args, n), S)\n\n        if isinstance(v_args_rf, Sequence):\n            u_args_rf, v_args_rf = v_args_rf, u_args_rf\n\n        if isinstance(u_args_rf, Sequence) and isinstance(v_args_rf, Sequence):\n            # TODO: We just ignore `n` when both are sequences?\n\n            if type(u_args_rf) != type(v_args_rf):\n                return\n\n            if no_ident and unify(u_args_rf, v_args_rf, S) is not False:\n                return\n\n            op_pred = partial(equal, op_rf)\n            u_args_flat = type(u_args_rf)(flatten_assoc_args(op_pred, u_args_rf))\n            v_args_flat = type(v_args_rf)(flatten_assoc_args(op_pred, v_args_rf))\n\n            if len(u_args_flat) == len(v_args_flat):\n                g = inner_eq(u_args_flat, v_args_flat)\n            else:\n                if len(u_args_flat) < len(v_args_flat):\n                    sm_args, lg_args = u_args_flat, v_args_flat\n                else:\n                    sm_args, lg_args = v_args_flat, u_args_flat\n\n                grp_sizes = len(lg_args) - len(sm_args) + 1\n                assoc_terms = assoc_args(\n                    op_rf, lg_args, grp_sizes, ctor=type(u_args_rf)\n                )\n\n                g = conde([inner_eq(sm_args, a_args)] for a_args in assoc_terms)\n\n            yield from g(S)\n\n        elif isinstance(u_args_rf, Sequence):\n            # TODO: We really need to know the arity (ranges) for the operator\n            # in order to make good choices here.\n            # For instance, does `(op, 1, 2) == (op, (op, 1, 2))` make sense?\n            # If so, the lower-bound on this range should actually be `1`.\n            if len(u_args_rf) == 1:\n                if not no_ident and (n_rf == 1 or n_rf is None):\n                    g = inner_eq(u_args_rf, v_args_rf)\n                else:\n                    return\n            else:\n\n                u_args_flat = list(flatten_assoc_args(partial(equal, op_rf), u_args_rf))\n\n                if n_rf is not None:\n                    arg_sizes = [n_rf]\n                else:\n                    arg_sizes = range(2, len(u_args_flat) + (not no_ident))\n\n                v_ac_args = (\n                    v_ac_arg\n                    for n_i in arg_sizes\n                    for v_ac_arg in assoc_args(\n                        op_rf, u_args_flat, n_i, ctor=type(u_args_rf)\n                    )\n                    if not no_ident or v_ac_arg != u_args_rf\n                )\n                g = conde([inner_eq(v_args_rf, v_ac_arg)] for v_ac_arg in v_ac_args)\n\n            yield from g(S)\n\n    return lall(\n        ground_order((a_args, b_args), (u_args, v_args)),\n        itero(u_args, nullo_refs=(v_args,), default_ConsNull=null_type),\n        eq_assoc_args_goal,\n    )\n\n\ndef eq_assoc(u, v, n=None, op_predicate=associative, null_type=etuple):\n    \"\"\"Create a goal for associative unification of two terms.\n\n    >>> from kanren import run, var, fact\n    >>> from kanren.assoccomm import eq_assoc as eq\n\n    >>> fact(commutative, 'add')    # declare that 'add' is commutative\n    >>> fact(associative, 'add')    # declare that 'add' is associative\n\n    >>> x = var()\n    >>> run(0, x, eq(('add', 1, 2, 3), ('add', 1, x)))\n    (('add', 2, 3),)\n    \"\"\"\n\n    def assoc_args_unique(a, b, op, **kwargs):\n        return eq_assoc_args(op, a, b, no_ident=True, null_type=null_type)\n\n    return term_walko(op_predicate, assoc_args_unique, u, v, n=n)\n\n\ndef eq_comm(u, v, op_predicate=commutative, null_type=etuple):\n    \"\"\"Create a goal for commutative equality.\n\n    >>> from kanren import run, var, fact\n    >>> from kanren.assoccomm import eq_comm as eq\n    >>> from kanren.assoccomm import commutative, associative\n\n    >>> fact(commutative, 'add')    # declare that 'add' is commutative\n    >>> fact(associative, 'add')    # declare that 'add' is associative\n\n    >>> x = var()\n    >>> run(0, x, eq(('add', 1, 2, 3), ('add', 2, x, 1)))\n    (3,)\n    \"\"\"\n\n    def permuteo_unique(x, y, op, **kwargs):\n        return permuteo(x, y, no_ident=True, default_ConsNull=null_type)\n\n    return term_walko(op_predicate, permuteo_unique, u, v)\n\n\ndef assoc_flatten(a, a_flat):\n    def assoc_flatten_goal(S):\n        nonlocal a, a_flat\n\n        a_rf = reify(a, S)\n\n        if isinstance(a_rf, Sequence) and (a_rf[0],) in associative.facts:\n\n            def op_pred(sub_op):\n                nonlocal S\n                sub_op_rf = reify(sub_op, S)\n                return sub_op_rf == a_rf[0]\n\n            a_flat_rf = type(a_rf)(flatten_assoc_args(op_pred, a_rf))\n        else:\n            a_flat_rf = a_rf\n\n        yield from eq(a_flat, a_flat_rf)(S)\n\n    return assoc_flatten_goal\n\n\ndef eq_assoccomm(u, v, null_type=etuple):\n    \"\"\"Construct a goal for associative and commutative unification.\n\n    >>> from kanren.assoccomm import eq_assoccomm as eq\n    >>> from kanren.assoccomm import commutative, associative\n    >>> from kanren import fact, run, var\n\n    >>> fact(commutative, 'add')    # declare that 'add' is commutative\n    >>> fact(associative, 'add')    # declare that 'add' is associative\n\n    >>> x = var()\n    >>> e1 = ('add', 1, 2, 3)\n    >>> e2 = ('add', 1, x)\n    >>> run(0, x, eq(e1, e2))\n    (('add', 3, 2), ('add', 2, 3))\n    \"\"\"\n\n    def eq_assoccomm_step(a, b, op):\n        z = var()\n        return lall(\n            # Permute\n            conde(\n                [\n                    commutative(op),\n                    permuteo(a, z, no_ident=True, default_ConsNull=etuple),\n                ],\n                [eq(a, z)],\n            ),\n            # Generate associative combinations\n            conde(\n                [associative(op), eq_assoc_args(op, z, b, no_ident=True)], [eq(z, b)]\n            ),\n        )\n\n    return term_walko(\n        lambda x: succeed,\n        eq_assoccomm_step,\n        u,\n        v,\n        format_step=assoc_flatten,\n        no_ident=False,\n    )\n"
  },
  {
    "path": "kanren/constraints.py",
    "content": "import weakref\nfrom abc import ABC, abstractmethod\nfrom collections import UserDict\nfrom collections.abc import Mapping\nfrom typing import Optional\n\nfrom cons.core import ConsPair\nfrom toolz import groupby\nfrom unification import Var, reify, unify, var\nfrom unification.core import _reify, isground\nfrom unification.utils import transitive_get as walk\n\nfrom .util import FlexibleSet\n\n\nclass ConstraintStore(ABC):\n    \"\"\"A class that enforces constraints between logic variables in a miniKanren state.\n\n    Attributes\n    ----------\n    lvar_constraints: MutableMapping\n        A mapping of logic variables to sets of objects that define their\n        constraints (e.g. a set of items with which the logic variable cannot\n        be unified).  The mapping's values are entirely determined by the\n        ConstraintStore implementation.\n\n    \"\"\"\n\n    __slots__ = (\"lvar_constraints\",)\n    op_str: Optional[str] = None\n\n    def __init__(self, lvar_constraints=None):\n        # self.lvar_constraints = weakref.WeakKeyDictionary(lvar_constraints)\n        self.lvar_constraints = lvar_constraints or dict()\n\n    @abstractmethod\n    def pre_unify_check(self, lvar_map, lvar=None, value=None):\n        \"\"\"Check a key-value pair before they're added to a ConstrainedState.\"\"\"\n        raise NotImplementedError()\n\n    @abstractmethod\n    def post_unify_check(self, lvar_map, lvar=None, value=None, old_state=None):\n        \"\"\"Check a key-value pair after they're added to a ConstrainedState.\n\n        XXX: This method may alter the internal constraints, so make a copy!\n        \"\"\"\n        raise NotImplementedError()\n\n    def add(self, lvar, lvar_constraint, **kwargs):\n        \"\"\"Add a new constraint.\"\"\"\n        if lvar not in self.lvar_constraints:\n            self.lvar_constraints[lvar] = FlexibleSet([lvar_constraint])\n        else:\n            self.lvar_constraints[lvar].add(lvar_constraint)\n\n    def constraints_str(self, lvar):\n        \"\"\"Print the constraints on a logic variable.\"\"\"\n        if lvar in self.lvar_constraints:\n            return f\"{self.op_str} {self.lvar_constraints[lvar]}\"\n        else:\n            return \"\"\n\n    def copy(self):\n        return type(self)(\n            lvar_constraints={k: v.copy() for k, v in self.lvar_constraints.items()},\n        )\n\n    def __contains__(self, lvar):\n        return lvar in self.lvar_constraints\n\n    def __eq__(self, other):\n        return (\n            type(self) == type(other)\n            and self.op_str == other.op_str\n            and self.lvar_constraints == other.lvar_constraints\n        )\n\n    def __repr__(self):\n        return f\"ConstraintStore({self.op_str}: {self.lvar_constraints})\"\n\n\nclass ConstrainedState(UserDict):\n    \"\"\"A miniKanren state that holds unifications of logic variables and upholds constraints on logic variables.\"\"\"  # noqa: E501\n\n    __slots__ = (\"constraints\",)\n\n    def __init__(self, *s, constraints=None):\n        super().__init__(*s)\n        self.constraints = dict(constraints or [])\n\n    def pre_unify_checks(self, lvar, value):\n        \"\"\"Check the constraints before unification.\"\"\"\n        return all(\n            cstore.pre_unify_check(self.data, lvar, value)\n            for cstore in self.constraints.values()\n        )\n\n    def post_unify_checks(self, lvar_map, lvar, value):\n        \"\"\"Check constraints and return an updated state and constraints.\n\n        Returns\n        -------\n        A new `ConstrainedState` and `False`.\n\n        \"\"\"\n        S = self.copy(data=lvar_map)\n        if any(\n            not cstore.post_unify_check(lvar_map, lvar, value, old_state=S)\n            for cstore in S.constraints.values()\n        ):\n            return False\n\n        return S\n\n    def copy(self, data=None):\n        if data is None:\n            data = self.data.copy()\n        return type(self)(\n            data, constraints={k: v.copy() for k, v in self.constraints.items()}\n        )\n\n    def __eq__(self, other):\n        if isinstance(other, ConstrainedState):\n            return self.data == other.data and self.constraints == other.constraints\n\n        if isinstance(other, Mapping) and not self.constraints:\n            return self.data == other\n\n        return False\n\n    def __repr__(self):\n        return f\"ConstrainedState({repr(self.data)}, {self.constraints})\"\n\n\ndef unify_ConstrainedState(u, v, S):\n    if S.pre_unify_checks(u, v):\n        s = unify(u, v, S.data)\n        if s is not False:\n            S = S.post_unify_checks(s, u, v)\n            if S is not False:\n                return S\n\n    return False\n\n\nunify.add((object, object, ConstrainedState), unify_ConstrainedState)\n\n\nclass ConstrainedVar(Var):\n    \"\"\"A logic variable that tracks its own constraints.\n\n    Currently, this is only for display/reification purposes.\n\n    \"\"\"\n\n    __slots__ = (\"S\", \"var\")\n\n    def __init__(self, var, S):\n        self.S = weakref.ref(S)\n        self.token = var.token\n        self.var = weakref.ref(var)\n\n    def __repr__(self):\n        S = self.S()\n        var = self.var()\n        res = super().__repr__()\n        if S is not None and var is not None:\n            u_constraints = \",\".join(\n                [c.constraints_str(var) for c in S.constraints.values()]\n            )\n            return f\"{res}: {{{u_constraints}}}\"\n        else:\n            return res\n\n    def __eq__(self, other):\n        if type(other) == type(self):\n            return self.S == other.S and self.token == other.token\n        elif type(other) == Var:\n            # NOTE: A more valid comparison is same token and no constraints.\n            return self.token == other.token\n        return NotImplemented\n\n    def __hash__(self):\n        return hash((Var, self.token))\n\n\ndef _reify_ConstrainedState(u, S):\n    u_res = walk(u, S.data)\n\n    if u_res is u:\n        yield ConstrainedVar(u_res, S)\n    else:\n        yield _reify(u_res, S)\n\n\n_reify.add((Var, ConstrainedState), _reify_ConstrainedState)\n\n\nclass DisequalityStore(ConstraintStore):\n    \"\"\"A disequality constraint (i.e. two things do not unify).\"\"\"\n\n    op_str = \"neq\"\n\n    def __init__(self, lvar_constraints=None):\n        super().__init__(lvar_constraints)\n\n    def post_unify_check(self, lvar_map, lvar=None, value=None, old_state=None):\n\n        for lv_key, constraints in list(self.lvar_constraints.items()):\n            lv = reify(lv_key, lvar_map)\n            constraints_rf = reify(tuple(constraints), lvar_map)\n\n            for cs in constraints_rf:\n                s = unify(lv, cs, {})\n\n                if s is not False and not s:\n                    # They already unify, but with no unground logic variables,\n                    # so we have an immediate violation of the constraint.\n                    return False\n                elif s is False:\n                    # They don't unify and have no unground logic variables, so\n                    # the constraint is immediately satisfied and there's no\n                    # reason to continue checking this constraint.\n                    constraints.discard(cs)\n                else:\n                    # They unify when/if the unifications in `s` are made, so\n                    # let's add these as new constraints.\n                    for k, v in s.items():\n                        self.add(k, v)\n\n            if len(constraints) == 0:\n                # This logic variable has no more unground constraints, so\n                # remove it.\n                del self.lvar_constraints[lv_key]\n\n        return True\n\n    def pre_unify_check(self, lvar_map, lvar=None, value=None):\n        return True\n\n\ndef neq(u, v):\n    \"\"\"Construct a disequality goal.\"\"\"\n\n    def neq_goal(S):\n        nonlocal u, v\n\n        u_rf, v_rf = reify((u, v), S)\n\n        # Get the unground logic variables that would unify the two objects;\n        # these are all the logic variables that we can't let unify.\n        s_uv = unify(u_rf, v_rf, {})\n\n        if s_uv is False:\n            # They don't unify and have no unground logic variables, so the\n            # constraint is immediately satisfied.\n            yield S\n            return\n        elif not s_uv:\n            # They already unify, but with no unground logic variables, so we\n            # have an immediate violation of the constraint.\n            return\n\n        if not isinstance(S, ConstrainedState):\n            S = ConstrainedState(S)\n\n        cs = S.constraints.setdefault(DisequalityStore, DisequalityStore())\n\n        for lvar, obj in s_uv.items():\n            cs.add(lvar, obj)\n\n        # We need to check the current state for validity.\n        if cs.post_unify_check(S.data):\n            yield S\n\n    return neq_goal\n\n\nclass PredicateStore(ConstraintStore, ABC):\n    \"\"\"An abstract store for testing simple predicates.\"\"\"\n\n    # Require that all constraints be satisfied for a term; otherwise, succeed\n    # if only one is satisfied.\n    require_all_constraints = True\n\n    # @abstractmethod\n    # def cterm_type_check(self, lvt):\n    #     \"\"\"Check the type of the constrained term when it's ground.\"\"\"\n    #     raise NotImplementedError()\n\n    @abstractmethod\n    def cparam_type_check(self, lvt):\n        \"\"\"Check the type of the constraint parameter when it's ground.\"\"\"\n        raise NotImplementedError()\n\n    @abstractmethod\n    def constraint_check(self, lv, lvt):\n        \"\"\"Check the constrained term against the constraint parameters when they're ground.\n\n        I.e. test the constraint.\n        \"\"\"\n        raise NotImplementedError()\n\n    @abstractmethod\n    def constraint_isground(self, lv, lvar_map):\n        \"\"\"Check whether or not the constrained term is \"ground enough\" to be checked.\"\"\"  # noqa: E501\n        raise NotImplementedError()\n\n    def post_unify_check(self, lvar_map, lvar=None, value=None, old_state=None):\n\n        for lv_key, constraints in list(self.lvar_constraints.items()):\n\n            lv = reify(lv_key, lvar_map)\n\n            is_lv_ground = self.constraint_isground(lv, lvar_map) or isground(\n                lv, lvar_map\n            )\n\n            if not is_lv_ground:\n                # This constraint isn't ready to be checked\n                continue\n\n            # if is_lv_ground and not self.cterm_type_check(lv):\n            #     self.lvar_constraints[lv_key]\n            #     return False\n\n            constraint_grps = groupby(\n                lambda x: isground(x, lvar_map), reify(iter(constraints), lvar_map)\n            )\n\n            constraints_unground = constraint_grps.get(False, ())\n            constraints_ground = constraint_grps.get(True, ())\n\n            if len(constraints_ground) > 0 and not all(\n                self.cparam_type_check(c) for c in constraints_ground\n            ):\n                # Some constraint parameters aren't the correct type, so fail.\n                # del self.lvar_constraints[lv_key]\n                return False\n\n            assert constraints_unground or constraints_ground\n\n            if is_lv_ground and len(constraints_unground) == 0:\n\n                if self.require_all_constraints and any(\n                    not self.constraint_check(lv, t) for t in constraints_ground\n                ):\n                    return False\n                elif not self.require_all_constraints and not any(\n                    self.constraint_check(lv, t) for t in constraints_ground\n                ):\n                    return False\n\n                # The instance and constraint parameters are all ground and the\n                # constraint is satisfied, so, since nothing should change from\n                # here on, we can remove the constraint.\n\n                del self.lvar_constraints[lv_key]\n\n        # Some types are unground, so we continue checking until they are\n        return True\n\n    def pre_unify_check(self, lvar_map, lvar=None, value=None):\n        return True\n\n\nclass TypeStore(PredicateStore):\n    \"\"\"A constraint store for asserting object types.\"\"\"\n\n    require_all_constraints = True\n\n    op_str = \"typeo\"\n\n    def __init__(self, lvar_constraints=None):\n        super().__init__(lvar_constraints)\n\n    def add(self, lvt, cparams):\n        if lvt in self.lvar_constraints:\n            raise ValueError(\"Only one type constraint can be applied to a term\")\n\n        return super().add(lvt, cparams)\n\n    # def cterm_type_check(self, lvt):\n    #     return True\n\n    def cparam_type_check(self, x):\n        return isinstance(x, type)\n\n    def constraint_check(self, x, cx):\n        return type(x) == cx\n\n    def constraint_isground(self, lv, lvar_map):\n        return not (isinstance(lv, Var) or issubclass(type(lv), ConsPair))\n\n\ndef typeo(u, u_type):\n    \"\"\"Construct a goal specifying the type of a term.\"\"\"\n\n    def typeo_goal(S):\n        nonlocal u, u_type\n\n        u_rf, u_type_rf = reify((u, u_type), S)\n\n        if not isground(u_rf, S) or not isground(u_type_rf, S):\n\n            if not isinstance(S, ConstrainedState):\n                S = ConstrainedState(S)\n\n            cs = S.constraints.setdefault(TypeStore, TypeStore())\n\n            try:\n                cs.add(u_rf, u_type_rf)\n            except TypeError:\n                # If the instance object can't be hashed, we can simply use a\n                # logic variable to uniquely identify it.\n                u_lv = var()\n                S[u_lv] = u_rf\n                cs.add(u_lv, u_type_rf)\n\n            if cs.post_unify_check(S.data, u_rf, u_type_rf):\n                yield S\n\n        elif isinstance(u_type_rf, type) and type(u_rf) == u_type_rf:\n            yield S\n\n    return typeo_goal\n\n\nclass IsinstanceStore(PredicateStore):\n    \"\"\"A constraint store for asserting object instance types.\"\"\"\n\n    op_str = \"isinstanceo\"\n\n    # Satisfying any one constraint is good enough\n    require_all_constraints = False\n\n    def __init__(self, lvar_constraints=None):\n        super().__init__(lvar_constraints)\n\n    # def cterm_type_check(self, lvt):\n    #     return True\n\n    def cparam_type_check(self, lvt):\n        return isinstance(lvt, type)\n\n    def constraint_check(self, lv, lvt):\n        return isinstance(lv, lvt)\n\n    def constraint_isground(self, lv, lvar_map):\n        return not (isinstance(lv, Var) or issubclass(type(lv), ConsPair))\n\n\ndef isinstanceo(u, u_type):\n    \"\"\"Construct a goal specifying that a term is an instance of a type.\n\n    Only a single instance type can be assigned per goal, i.e.\n\n        lany(isinstanceo(var(), list),\n             isinstanceo(var(), tuple))\n\n    and not\n\n        isinstanceo(var(), (list, tuple))\n\n    \"\"\"\n\n    def isinstanceo_goal(S):\n        nonlocal u, u_type\n\n        u_rf, u_type_rf = reify((u, u_type), S)\n\n        if not isground(u_rf, S) or not isground(u_type_rf, S):\n\n            if not isinstance(S, ConstrainedState):\n                S = ConstrainedState(S)\n\n            cs = S.constraints.setdefault(IsinstanceStore, IsinstanceStore())\n\n            try:\n                cs.add(u_rf, u_type_rf)\n            except TypeError:\n                # If the instance object can't be hashed, we can simply use a\n                # logic variable to uniquely identify it.\n                u_lv = var()\n                S[u_lv] = u_rf\n                cs.add(u_lv, u_type_rf)\n\n            if cs.post_unify_check(S.data, u_rf, u_type_rf):\n                yield S\n\n        # elif isground(u_type, S):\n        #     yield from lany(eq(u_type, u_t) for u_t in type(u).mro())(S)\n        elif (\n            isinstance(u_type_rf, type)\n            # or (\n            #     isinstance(u_type, Iterable)\n            #     and all(isinstance(t, type) for t in u_type)\n            # )\n        ) and isinstance(u_rf, u_type_rf):\n            yield S\n\n    return isinstanceo_goal\n"
  },
  {
    "path": "kanren/core.py",
    "content": "from collections.abc import Sequence\nfrom functools import partial, reduce\nfrom itertools import tee\nfrom operator import length_hint\nfrom typing import (\n    Any,\n    Callable,\n    Iterable,\n    Iterator,\n    Literal,\n    MutableMapping,\n    Optional,\n    Tuple,\n    Union,\n    cast,\n)\n\nfrom cons.core import ConsPair\nfrom toolz import interleave, take\nfrom unification import isvar, reify, unify\nfrom unification.core import isground\n\n\nStateType = Union[MutableMapping, Literal[False]]\nStateStreamType = Iterator[StateType]\nGoalType = Callable[[StateType], StateStreamType]\n\n\ndef fail(s: StateType) -> Iterator[StateType]:\n    return iter(())\n\n\ndef succeed(s: StateType) -> Iterator[StateType]:\n    return iter((s,))\n\n\ndef eq(u: Any, v: Any) -> GoalType:\n    \"\"\"Construct a goal stating that its arguments must unify.\n\n    See Also\n    --------\n        unify\n    \"\"\"\n\n    def eq_goal(s: StateType) -> StateStreamType:\n        s = unify(u, v, s)\n        if s is not False:\n            return iter((s,))\n        else:\n            return iter(())\n\n    return eq_goal\n\n\ndef ldisj_seq(goals: Iterable[GoalType]) -> GoalType:\n    \"\"\"Produce a goal that returns the appended state stream from all successful goal arguments.\n\n    In other words, it behaves like logical disjunction/OR for goals.\n    \"\"\"  # noqa: E501\n\n    if length_hint(goals, -1) == 0:\n        return succeed\n\n    assert isinstance(goals, Iterable)\n\n    def ldisj_seq_goal(S: StateType) -> StateStreamType:\n        nonlocal goals\n\n        goals, _goals = tee(goals)\n\n        yield from interleave(g(S) for g in _goals)\n\n    return ldisj_seq_goal\n\n\ndef bind(z: StateStreamType, g: GoalType) -> StateStreamType:\n    \"\"\"Apply a goal to a state stream and then combine the resulting state streams.\"\"\"\n    # We could also use `chain`, but `interleave` preserves the old behavior.\n    # return chain.from_iterable(map(g, z))\n    return cast(StateStreamType, interleave(map(g, z)))\n\n\ndef lconj_seq(goals: Iterable[GoalType]) -> GoalType:\n    \"\"\"Produce a goal that returns the appended state stream in which all goals are necessarily successful.\n\n    In other words, it behaves like logical conjunction/AND for goals.\n    \"\"\"  # noqa: E501\n\n    if length_hint(goals, -1) == 0:\n        return succeed\n\n    assert isinstance(goals, Iterable)\n\n    def lconj_seq_goal(S: StateType) -> StateStreamType:\n        nonlocal goals\n\n        goals, _goals = tee(goals)\n\n        g0 = next(_goals, None)\n\n        if g0 is None:\n            return\n\n        yield from reduce(bind, _goals, g0(S))\n\n    return lconj_seq_goal\n\n\ndef ldisj(*goals: Union[GoalType, Iterable[GoalType]]) -> GoalType:\n    \"\"\"Form a disjunction of goals.\"\"\"\n    if len(goals) == 1 and isinstance(goals[0], Iterable):\n        return ldisj_seq(goals[0])\n\n    return ldisj_seq(cast(Tuple[GoalType, ...], goals))\n\n\ndef lconj(*goals: Union[GoalType, Iterable[GoalType]]) -> GoalType:\n    \"\"\"Form a conjunction of goals.\"\"\"\n    if len(goals) == 1 and isinstance(goals[0], Iterable):\n        return lconj_seq(goals[0])\n\n    return lconj_seq(cast(Tuple[GoalType, ...], goals))\n\n\ndef conde(\n    *goals: Union[Iterable[GoalType], Iterator[Iterable[GoalType]]]\n) -> Union[GoalType, StateStreamType]:\n    \"\"\"Form a disjunction of goal conjunctions.\"\"\"\n    if len(goals) == 1 and isinstance(goals[0], Iterator):\n        return ldisj_seq(\n            lconj_seq(g) for g in cast(Iterator[Iterable[GoalType]], goals[0])\n        )\n\n    return ldisj_seq(lconj_seq(g) for g in cast(Tuple[Iterable[GoalType], ...], goals))\n\n\nlall = lconj\nlany = ldisj\n\n\ndef ground_order_key(S: StateType, x: Any) -> Literal[-1, 0, 1, 2]:\n    if isvar(x):\n        return 2\n    elif isground(x, S):\n        return -1\n    elif issubclass(type(x), ConsPair):\n        return 1\n    else:\n        return 0\n\n\ndef ground_order(in_args: Any, out_args: Any) -> GoalType:\n    \"\"\"Construct a non-relational goal that orders a list of terms based on groundedness (grounded precede ungrounded).\"\"\"  # noqa: E501\n\n    def ground_order_goal(S: StateType) -> StateStreamType:\n        nonlocal in_args, out_args\n\n        in_args_rf, out_args_rf = reify((in_args, out_args), S)\n\n        S_new = unify(\n            list(out_args_rf) if isinstance(out_args_rf, Sequence) else out_args_rf,\n            sorted(in_args_rf, key=partial(ground_order_key, S)),\n            S,\n        )\n\n        if S_new is not False:\n            yield S_new\n\n    return ground_order_goal\n\n\ndef ifa(g1: GoalType, g2: GoalType) -> GoalType:\n    \"\"\"Create a goal operator that returns the first stream unless it fails.\"\"\"\n\n    def ifa_goal(S: StateType) -> StateStreamType:\n        g1_stream = g1(S)\n        S_new = next(g1_stream, None)\n\n        if S_new is None:\n            yield from g2(S)\n        else:\n            yield S_new\n            yield from g1_stream\n\n    return ifa_goal\n\n\ndef Zzz(gctor: Callable[[Any], GoalType], *args, **kwargs) -> GoalType:\n    \"\"\"Create an inverse-η-delay for a goal.\"\"\"\n\n    def Zzz_goal(S: StateType) -> StateStreamType:\n        yield from gctor(*args, **kwargs)(S)\n\n    return Zzz_goal\n\n\ndef run(\n    n: Union[None, int],\n    x: Any,\n    *goals: GoalType,\n    results_filter: Optional[Callable[[Iterator[Any]], Any]] = None\n) -> Union[Tuple[Any, ...], Iterator[Any]]:\n    \"\"\"Run a logic program and obtain `n` solutions that satisfy the given goals.\n\n    >>> from kanren import run, var, eq\n    >>> x = var()\n    >>> run(1, x, eq(x, 1))\n    (1,)\n\n    Parameters\n    ----------\n    n\n        The number of desired solutions. ``n=0`` returns a tuple with all\n        results and ``n=None`` returns a lazy sequence of all results.\n    x\n        The form to reify and return.  Usually contains logic variables used in\n        the given goals.\n    goals\n        A sequence of goals that must be true in logical conjunction\n        (i.e. `lall`).\n    results_filter\n        A function to apply to the results stream (e.g. a `unique` filter).\n\n    Returns\n    -------\n    Either an iterable or tuple of reified `x` values that satisfy the goals.\n\n    \"\"\"\n    g = lall(*goals)\n    results = map(partial(reify, x), g({}))\n\n    if results_filter is not None:\n        results = results_filter(results)\n\n    if n is None:\n        return results\n    elif n == 0:\n        return tuple(results)\n    else:\n        return tuple(take(n, results))\n\n\ndef dbgo(*args: Any, msg: Optional[Any] = None) -> GoalType:  # pragma: no cover\n    \"\"\"Construct a goal that sets a debug trace and prints reified arguments.\"\"\"\n    from pprint import pprint\n\n    def dbgo_goal(S: StateType) -> StateStreamType:\n        nonlocal args\n        args = reify(args, S)\n\n        if msg is not None:\n            print(msg)\n\n        pprint(args)\n\n        import pdb\n\n        pdb.set_trace()\n        yield S\n\n    return dbgo_goal\n"
  },
  {
    "path": "kanren/facts.py",
    "content": "from toolz import merge\nfrom unification import reify, unify\n\nfrom .util import intersection\n\n\nclass Relation(object):\n    _id = 0\n\n    def __init__(self, name=None):\n        self.facts = set()\n        self.index = dict()\n        if not name:\n            name = \"_%d\" % Relation._id\n            Relation._id += 1\n        self.name = name\n\n    def add_fact(self, *inputs):\n        \"\"\"Add a fact to the knowledge-base.\n\n        See Also\n        --------\n            fact\n            facts\n        \"\"\"\n        fact = tuple(inputs)\n\n        self.facts.add(fact)\n\n        for key in enumerate(inputs):\n            if key not in self.index:\n                self.index[key] = set()\n            self.index[key].add(fact)\n\n    def __call__(self, *args):\n        \"\"\"Return a goal that produces a list of substitutions matching a fact in the knowledge-base.\n\n        >>> from kanren.facts import Relation\n        >>> from unification import var\n        >>>\n        >>> x, y = var('x'), var('y')\n        >>> r = Relation()\n        >>> r.add_fact(1, 2, 3)\n        >>> r.add_fact(4, 5, 6)\n        >>> list(r(x, y, 3)({})) == [{y: 2, x: 1}]\n        True\n        >>> list(r(x, 5, y)({})) == [{y: 6, x: 4}]\n        True\n        >>> list(r(x, 42, y)({}))\n        []\n\n        Parameters\n        ----------\n        *args:\n            The goal to evaluate. This consists of vars and values to match\n            facts against.\n\n        \"\"\"  # noqa: E501\n\n        def goal(substitution):\n            args2 = reify(args, substitution)\n            subsets = [self.index[key] for key in enumerate(args) if key in self.index]\n            if subsets:  # we are able to reduce the pool early\n                facts = intersection(*sorted(subsets, key=len))\n            else:\n                facts = self.facts\n\n            for fact in facts:\n                unified = unify(fact, args2, substitution)\n                if unified != False:\n                    yield merge(unified, substitution)\n\n        return goal\n\n    def __str__(self):\n        return f\"Rel: {self.name}\"\n\n    def __repr__(self):\n        return f\"{type(self).__name__}({self.name}, {self.index}, {self.facts})\"\n\n\ndef fact(rel, *args):\n    \"\"\"Declare a fact.\n\n    >>> from kanren import fact, Relation, var, run\n    >>> parent = Relation()\n    >>> fact(parent, \"Homer\", \"Bart\")\n    >>> fact(parent, \"Homer\", \"Lisa\")\n\n    >>> x = var()\n    >>> run(1, x, parent(x, \"Bart\"))\n    ('Homer',)\n    \"\"\"\n    rel.add_fact(*args)\n\n\ndef facts(rel, *lists):\n    \"\"\"Declare several facts.\n\n    >>> from kanren import fact, Relation, var, run\n    >>> parent = Relation()\n    >>> facts(parent,  (\"Homer\", \"Bart\"),\n    ...                (\"Homer\", \"Lisa\"))\n\n    >>> x = var()\n    >>> run(1, x, parent(x, \"Bart\"))\n    ('Homer',)\n    \"\"\"\n    for lst in lists:\n        fact(rel, *lst)\n"
  },
  {
    "path": "kanren/goals.py",
    "content": "from collections import Counter\nfrom collections.abc import Sequence\nfrom functools import partial\nfrom itertools import permutations\nfrom operator import length_hint\n\nfrom cons import cons\nfrom cons.core import ConsNull, ConsPair\nfrom unification import reify, var\nfrom unification.core import isground\n\nfrom .core import conde, eq, lall, lany\n\n\ndef heado(head, coll):\n    \"\"\"Construct a goal stating that head is the head of coll.\n\n    See Also\n    --------\n        tailo\n        conso\n    \"\"\"\n    return eq(cons(head, var()), coll)\n\n\ndef tailo(tail, coll):\n    \"\"\"Construct a goal stating that tail is the tail of coll.\n\n    See Also\n    --------\n        heado\n        conso\n    \"\"\"\n    return eq(cons(var(), tail), coll)\n\n\ndef conso(h, t, r):\n    \"\"\"Construct a goal stating that cons h + t == r.\"\"\"\n    return eq(cons(h, t), r)\n\n\ndef nullo(*args, refs=None, default_ConsNull=list):\n    \"\"\"Create a goal asserting that one or more terms are a/the same `ConsNull` type.\n\n    `ConsNull` types return proper Python collections when used as a CDR value\n    in a CONS (e.g. `cons(1, []) == [1]`).\n\n    This goal doesn't require that all args be unifiable; only that they have\n    the same `ConsNull` type.  Unlike the classic `lall(eq(x, []), eq(y, x))`\n    `conde`-branch idiom used when recursively walking a single sequence via\n    `conso`, this allows us to perform the same essential function while\n    walking distinct lists that do not necessarily terminate on the same\n    iteration.\n\n    Parameters\n    ----------\n    args: tuple of objects\n        The terms to consider as an instance of the `ConsNull` type\n    refs: tuple of objects\n        The terms to use as reference types.  These are not unified with the\n        `ConsNull` type, instead they are used to constrain the `ConsNull`\n        types considered valid.\n    default_ConsNull: type\n        The sequence type to use when all logic variables are unground.\n\n    \"\"\"\n\n    def nullo_goal(s):\n\n        nonlocal args, default_ConsNull\n\n        if refs is not None:\n            refs_rf = reify(refs, s)\n        else:\n            refs_rf = ()\n\n        args_rf = reify(args, s)\n\n        arg_null_types = set(\n            # Get an empty instance of the type\n            type(a)\n            for a in args_rf + refs_rf\n            # `ConsPair` and `ConsNull` types that are not literally `ConsPair`s\n            if isinstance(a, (ConsPair, ConsNull)) and not issubclass(type(a), ConsPair)\n        )\n\n        try:\n            null_type = arg_null_types.pop()\n        except KeyError:\n            null_type = default_ConsNull\n\n        if len(arg_null_types) > 0 and any(a != null_type for a in arg_null_types):\n            # Mismatching null types: fail.\n            return\n\n        g = lall(*[eq(a, null_type()) for a in args_rf])\n\n        yield from g(s)\n\n    return nullo_goal\n\n\ndef itero(lst, nullo_refs=None, default_ConsNull=list):\n    \"\"\"Construct a goal asserting that a term is an iterable type.\n\n    This is a generic version of the standard `listo` that accounts for\n    different iterable types supported by `cons` in Python.\n\n    See `nullo`\n    \"\"\"\n\n    def itero_goal(S):\n        nonlocal lst, nullo_refs, default_ConsNull\n        l_rf = reify(lst, S)\n        c, d = var(), var()\n        g = conde(\n            [nullo(l_rf, refs=nullo_refs, default_ConsNull=default_ConsNull)],\n            [conso(c, d, l_rf), itero(d, default_ConsNull=default_ConsNull)],\n        )\n        yield from g(S)\n\n    return itero_goal\n\n\ndef membero(x, ls):\n    \"\"\"Construct a goal stating that x is an item of coll.\"\"\"\n\n    def membero_goal(S):\n        nonlocal x, ls\n\n        x_rf, ls_rf = reify((x, ls), S)\n        a, d = var(), var()\n\n        g = lall(conso(a, d, ls), conde([eq(a, x)], [membero(x, d)]))\n\n        yield from g(S)\n\n    return membero_goal\n\n\ndef appendo(lst, s, out, default_ConsNull=list):\n    \"\"\"Construct a goal for the relation lst + s = ls.\n\n    See Byrd thesis pg. 247\n    https://scholarworks.iu.edu/dspace/bitstream/handle/2022/8777/Byrd_indiana_0093A_10344.pdf\n    \"\"\"\n\n    def appendo_goal(S):\n        nonlocal lst, s, out\n\n        l_rf, s_rf, out_rf = reify((lst, s, out), S)\n\n        a, d, res = var(prefix=\"a\"), var(prefix=\"d\"), var(prefix=\"res\")\n\n        _nullo = partial(nullo, default_ConsNull=default_ConsNull)\n\n        g = conde(\n            [\n                # All empty\n                _nullo(s_rf, l_rf, out_rf),\n            ],\n            [\n                # `lst` is empty\n                conso(a, d, out_rf),\n                eq(s_rf, out_rf),\n                _nullo(l_rf, refs=(s_rf, out_rf)),\n            ],\n            [\n                conso(a, d, l_rf),\n                conso(a, res, out_rf),\n                appendo(d, s_rf, res, default_ConsNull=default_ConsNull),\n            ],\n        )\n\n        yield from g(S)\n\n    return appendo_goal\n\n\ndef rembero(x, lst, o, default_ConsNull=list):\n    \"\"\"Remove the first occurrence of `x` in `lst` resulting in `o`.\"\"\"\n\n    from .constraints import neq\n\n    def rembero_goal(s):\n        nonlocal x, lst, o\n\n        x_rf, l_rf, o_rf = reify((x, lst, o), s)\n\n        l_car, l_cdr, r = var(), var(), var()\n\n        g = conde(\n            [\n                nullo(l_rf, o_rf, default_ConsNull=default_ConsNull),\n            ],\n            [\n                conso(l_car, l_cdr, l_rf),\n                eq(x_rf, l_car),\n                eq(l_cdr, o_rf),\n            ],\n            [\n                conso(l_car, l_cdr, l_rf),\n                neq(l_car, x),\n                conso(l_car, r, o_rf),\n                rembero(x_rf, l_cdr, r, default_ConsNull=default_ConsNull),\n            ],\n        )\n\n        yield from g(s)\n\n    return rembero_goal\n\n\ndef permuteo(a, b, inner_eq=eq, default_ConsNull=list, no_ident=False):\n    \"\"\"Construct a goal asserting equality of sequences under permutation.\n\n    For example, (1, 2, 2) equates to (2, 1, 2) under permutation\n    >>> from kanren import var, run, permuteo\n    >>> x = var()\n    >>> run(0, x, permuteo(x, (1, 2)))\n    ((1, 2), (2, 1))\n\n    >>> run(0, x, permuteo((2, 1, x), (2, 1, 2)))\n    (2,)\n    \"\"\"\n\n    def permuteo_goal(S):\n        nonlocal a, b, default_ConsNull, inner_eq\n\n        a_rf, b_rf = reify((a, b), S)\n\n        # If the lengths differ, then fail\n        a_len, b_len = length_hint(a_rf, -1), length_hint(b_rf, -1)\n        if a_len > 0 and b_len > 0 and a_len != b_len:\n            return\n\n        if isinstance(a_rf, Sequence):\n\n            a_type = type(a_rf)\n\n            a_perms = permutations(a_rf)\n\n            if no_ident:\n                next(a_perms)\n\n            if isinstance(b_rf, Sequence):\n\n                b_type = type(b_rf)\n\n                # Fail on mismatched types or straight equality (when\n                # `no_ident` is enabled)\n                if a_type != b_type or (no_ident and a_rf == b_rf):\n                    return\n\n                try:\n                    # `a` and `b` are sequences, so let's see if we can pull out\n                    # all the (hash-)equivalent elements.\n                    # XXX: Use of this requires that the equivalence relation\n                    # implied by `inner_eq` be a *superset* of `eq`.\n\n                    cntr_a, cntr_b = Counter(a_rf), Counter(b_rf)\n                    rdcd_a, rdcd_b = cntr_a - cntr_b, cntr_b - cntr_a\n\n                    if len(rdcd_a) == len(rdcd_b) == 0:\n                        yield S\n                        return\n                    elif len(rdcd_a) < len(cntr_a):\n                        a_rf, b_rf = tuple(rdcd_a.elements()), b_type(rdcd_b.elements())\n                        a_perms = permutations(a_rf)\n\n                except TypeError:\n                    # TODO: We could probably get more coverage for this case\n                    # by using `HashableForm`.\n                    pass\n\n                # If they're both ground and we're using basic unification,\n                # then simply check that one is a permutation of the other and\n                # be done.  No need to create and evaluate a bunch of goals in\n                # order to do something that can be done right here.\n                # Naturally, this assumes that the `isground` checks aren't\n                # nearly as costly as all that other stuff.  If the gains\n                # depend on the sizes of `a` and `b`, then we could do\n                # `length_hint` checks first.\n                if inner_eq == eq and isground(a_rf, S) and isground(b_rf, S):\n                    if tuple(b_rf) in a_perms:\n                        yield S\n                        return\n                    else:\n                        # This has to be a definitive check, since we can only\n                        # use the `a_perms` generator once; plus, we don't want\n                        # to iterate over it more than once!\n                        return\n\n            yield from lany(inner_eq(b_rf, a_type(i)) for i in a_perms)(S)\n\n        elif isinstance(b_rf, Sequence):\n\n            b_type = type(b_rf)\n            b_perms = permutations(b_rf)\n\n            if no_ident:\n                next(b_perms)\n\n            yield from lany(inner_eq(a_rf, b_type(i)) for i in b_perms)(S)\n\n        else:\n\n            # None of the arguments are proper sequences, so state that one\n            # should be and apply `permuteo` to that.\n\n            a_itero_g = itero(\n                a_rf, nullo_refs=(b_rf,), default_ConsNull=default_ConsNull\n            )\n\n            for S_new in a_itero_g(S):\n                a_new = reify(a_rf, S_new)\n                a_type = type(a_new)\n                a_perms = permutations(a_new)\n\n                if no_ident:\n                    next(a_perms)\n\n                yield from lany(inner_eq(b_rf, a_type(i)) for i in a_perms)(S_new)\n\n    return permuteo_goal\n\n\n# For backward compatibility\npermuteq = permuteo\n"
  },
  {
    "path": "kanren/graph.py",
    "content": "from functools import partial\n\nfrom etuples import etuple\nfrom unification import isvar, reify, var\n\nfrom .core import Zzz, conde, eq, fail, ground_order, lall, succeed\nfrom .goals import conso, nullo\nfrom .term import applyo\n\n\ndef mapo(relation, a, b, null_type=list, null_res=True, first=True):\n    \"\"\"Apply a relation to corresponding elements in two sequences and succeed if the relation succeeds for all pairs.\"\"\"  # noqa: E501\n\n    b_car, b_cdr = var(), var()\n    a_car, a_cdr = var(), var()\n\n    return conde(\n        [nullo(a, b, default_ConsNull=null_type) if (not first or null_res) else fail],\n        [\n            conso(a_car, a_cdr, a),\n            conso(b_car, b_cdr, b),\n            Zzz(relation, a_car, b_car),\n            Zzz(mapo, relation, a_cdr, b_cdr, null_type=null_type, first=False),\n        ],\n    )\n\n\ndef map_anyo(\n    relation, a, b, null_type=list, null_res=False, first=True, any_succeed=False\n):\n    \"\"\"Apply a relation to corresponding elements in two sequences and succeed if at least one pair succeeds.\n\n    Parameters\n    ----------\n    null_type: optional\n       An object that's a valid cdr for the collection type desired.  If\n       `False` (i.e. the default value), the cdr will be inferred from the\n       inputs, or defaults to an empty list.\n    \"\"\"  # noqa: E501\n\n    b_car, b_cdr = var(), var()\n    a_car, a_cdr = var(), var()\n\n    return conde(\n        [\n            nullo(a, b, default_ConsNull=null_type)\n            if (any_succeed or (first and null_res))\n            else fail\n        ],\n        [\n            conso(a_car, a_cdr, a),\n            conso(b_car, b_cdr, b),\n            conde(\n                [\n                    Zzz(relation, a_car, b_car),\n                    Zzz(\n                        map_anyo,\n                        relation,\n                        a_cdr,\n                        b_cdr,\n                        null_type=null_type,\n                        any_succeed=True,\n                        first=False,\n                    ),\n                ],\n                [\n                    eq(a_car, b_car),\n                    Zzz(\n                        map_anyo,\n                        relation,\n                        a_cdr,\n                        b_cdr,\n                        null_type=null_type,\n                        any_succeed=any_succeed,\n                        first=False,\n                    ),\n                ],\n            ),\n        ],\n    )\n\n\ndef vararg_success(*args):\n    return succeed\n\n\ndef eq_length(u, v, default_ConsNull=list):\n    \"\"\"Construct a goal stating that two sequences are the same length and type.\"\"\"\n\n    return mapo(vararg_success, u, v, null_type=default_ConsNull)\n\n\ndef reduceo(relation, in_term, out_term, *args, **kwargs):\n    \"\"\"Relate a term and the fixed-point of that term under a given relation.\n\n    This includes the \"identity\" relation.\n    \"\"\"\n\n    def reduceo_goal(s):\n\n        nonlocal in_term, out_term, relation, args, kwargs\n\n        in_term_rf, out_term_rf = reify((in_term, out_term), s)\n\n        # The result of reducing the input graph once\n        term_rdcd = var()\n\n        # Are we working \"backward\" and (potentially) \"expanding\" a graph\n        # (e.g. when the relation is a reduction rule)?\n        is_expanding = isvar(in_term_rf)\n\n        # One application of the relation assigned to `term_rdcd`\n        single_apply_g = relation(in_term_rf, term_rdcd, *args, **kwargs)\n\n        # Assign/equate (unify, really) the result of a single application to\n        # the \"output\" term.\n        single_res_g = eq(term_rdcd, out_term_rf)\n\n        # Recurse into applications of the relation (well, produce a goal that\n        # will do that)\n        another_apply_g = reduceo(relation, term_rdcd, out_term_rf, *args, **kwargs)\n\n        # We want the fixed-point value to show up in the stream output\n        # *first*, but that requires some checks.\n        if is_expanding:\n            # When an un-reduced term is a logic variable (e.g. we're\n            # \"expanding\"), we can't go depth first.\n            # We need to draw the association between (i.e. unify) the reduced\n            # and expanded terms ASAP, in order to produce finite\n            # expanded graphs first and yield results.\n            #\n            # In other words, there's no fixed-point to produce in this\n            # situation.  Instead, for example, we have to produce an infinite\n            # stream of terms that have `out_term_rf` as a fixed point.\n            # g = conde([single_res_g, single_apply_g],\n            #           [another_apply_g, single_apply_g])\n            g = lall(conde([single_res_g], [another_apply_g]), single_apply_g)\n        else:\n            # Run the recursion step first, so that we get the fixed-point as\n            # the first result\n            g = lall(single_apply_g, conde([another_apply_g], [single_res_g]))\n\n        yield from g(s)\n\n    return reduceo_goal\n\n\ndef walko(\n    goal,\n    graph_in,\n    graph_out,\n    rator_goal=None,\n    null_type=etuple,\n    map_rel=partial(map_anyo, null_res=True),\n):\n    \"\"\"Apply a binary relation between all nodes in two graphs.\n\n    When `rator_goal` is used, the graphs are treated as term graphs, and the\n    multi-functions `rator`, `rands`, and `apply` are used to walk the graphs.\n    Otherwise, the graphs must be iterable according to `map_anyo`.\n\n    Parameters\n    ----------\n    goal: callable\n        A goal that is applied to all terms in the graph.\n    graph_in: object\n        The graph for which the left-hand side of a binary relation holds.\n    graph_out: object\n        The graph for which the right-hand side of a binary relation holds.\n    rator_goal: callable (default None)\n        A goal that is applied to the rators of a graph.  When specified,\n        `goal` is only applied to rands and it must succeed along with the\n        rator goal in order to descend into sub-terms.\n    null_type: type\n        The collection type used when it is not fully determined by the graph\n        arguments.\n    map_rel: callable\n        The map relation used to apply `goal` to a sub-graph.\n    \"\"\"\n\n    def walko_goal(s):\n\n        nonlocal goal, rator_goal, graph_in, graph_out, null_type, map_rel\n\n        graph_in_rf, graph_out_rf = reify((graph_in, graph_out), s)\n\n        rator_in, rands_in, rator_out, rands_out = var(), var(), var(), var()\n\n        _walko = partial(\n            walko, goal, rator_goal=rator_goal, null_type=null_type, map_rel=map_rel\n        )\n\n        g = conde(\n            # TODO: Use `Zzz`, if needed.\n            [\n                goal(graph_in_rf, graph_out_rf),\n            ],\n            [\n                lall(\n                    applyo(rator_in, rands_in, graph_in_rf),\n                    applyo(rator_out, rands_out, graph_out_rf),\n                    rator_goal(rator_in, rator_out),\n                    map_rel(_walko, rands_in, rands_out, null_type=null_type),\n                )\n                if rator_goal is not None\n                else map_rel(_walko, graph_in_rf, graph_out_rf, null_type=null_type),\n            ],\n        )\n\n        yield from g(s)\n\n    return walko_goal\n\n\ndef term_walko(\n    rator_goal,\n    rands_goal,\n    a,\n    b,\n    null_type=etuple,\n    no_ident=False,\n    format_step=None,\n    **kwargs\n):\n    \"\"\"Construct a goal for walking a term graph.\n\n    This implementation is somewhat specific to the needs of `eq_comm` and\n    `eq_assoc`, but it could be transferred to `kanren.graph`.\n\n    XXX: Make sure `rator_goal` will succeed for unground logic variables;\n    otherwise, this will diverge.\n    XXX: `rands_goal` should not be contain `eq`, i.e. `rands_goal(x, x)`\n    should always fail!\n    \"\"\"\n\n    def single_step(s, t):\n        u, v = var(), var()\n        u_rator, u_rands = var(), var()\n        v_rands = var()\n\n        return lall(\n            ground_order((s, t), (u, v)),\n            applyo(u_rator, u_rands, u),\n            applyo(u_rator, v_rands, v),\n            rator_goal(u_rator),\n            # These make sure that there are at least two rands, which\n            # makes sense for commutativity and associativity, at least.\n            conso(var(), var(), u_rands),\n            conso(var(), var(), v_rands),\n            Zzz(rands_goal, u_rands, v_rands, u_rator, **kwargs),\n        )\n\n    def term_walko_step(s, t):\n        nonlocal rator_goal, rands_goal, null_type\n        u, v = var(), var()\n        z, w = var(), var()\n\n        return lall(\n            ground_order((s, t), (u, v)),\n            format_step(u, w) if format_step is not None else eq(u, w),\n            conde(\n                [\n                    # Apply, then walk or return\n                    single_step(w, v),\n                ],\n                [\n                    # Walk, then apply or return\n                    map_anyo(term_walko_step, w, z, null_type=null_type),\n                    conde([eq(z, v)], [single_step(z, v)]),\n                ],\n            ),\n        )\n\n    return lall(\n        term_walko_step(a, b)\n        if no_ident\n        else conde([term_walko_step(a, b)], [eq(a, b)]),\n    )\n"
  },
  {
    "path": "kanren/py.typed",
    "content": ""
  },
  {
    "path": "kanren/term.py",
    "content": "from collections.abc import Mapping, Sequence\n\nfrom cons.core import ConsError, cons\nfrom etuples import apply as term\nfrom etuples import rands as arguments\nfrom etuples import rator as operator\nfrom unification.core import _reify, _unify, construction_sentinel, reify\nfrom unification.variable import isvar\n\nfrom .core import eq, lall\nfrom .goals import conso\n\n\ndef applyo(o_rator, o_rands, obj):\n    \"\"\"Construct a goal that relates an object to the application of its (ope)rator to its (ope)rands.\n\n    In other words, this is the relation `op(*args) == obj`.  It uses the\n    `rator`, `rands`, and `apply` dispatch functions from `etuples`, so\n    implement/override those to get the desired behavior.\n\n    \"\"\"  # noqa: E501\n\n    def applyo_goal(S):\n        nonlocal o_rator, o_rands, obj\n\n        o_rator_rf, o_rands_rf, obj_rf = reify((o_rator, o_rands, obj), S)\n\n        if not isvar(obj_rf):\n\n            # We should be able to use this goal with *any* arguments, so\n            # fail when the ground operations fail/err.\n            try:\n                obj_rator, obj_rands = operator(obj_rf), arguments(obj_rf)\n            except (ConsError, NotImplementedError):\n                return\n\n            # The object's rator + rands should be the same as the goal's\n            yield from lall(eq(o_rator_rf, obj_rator), eq(o_rands_rf, obj_rands))(S)\n\n        elif isvar(o_rands_rf) or isvar(o_rator_rf):\n            # The object and at least one of the rand, rators is a logic\n            # variable, so let's just assert a `cons` relationship between\n            # them\n            yield from conso(o_rator_rf, o_rands_rf, obj_rf)(S)\n        else:\n            # The object is a logic variable, but the rator and rands aren't.\n            # We assert that the object is the application of the rand and\n            # rators.\n            try:\n                obj_applied = term(o_rator_rf, o_rands_rf)\n            except (ConsError, NotImplementedError):\n                return\n            yield from eq(obj_rf, obj_applied)(S)\n\n    return applyo_goal\n\n\n@term.register(object, Sequence)\ndef term_Sequence(rator, rands):\n    # Overwrite the default `apply` dispatch function and make it preserve\n    # types\n    res = cons(rator, rands)\n    return res\n\n\ndef unifiable_with_term(cls):\n    _reify.add((cls, Mapping), reify_term)\n    _unify.add((cls, cls, Mapping), unify_term)\n    return cls\n\n\ndef reify_term(obj, s):\n    op, args = operator(obj), arguments(obj)\n    op = yield _reify(op, s)\n    args = yield _reify(args, s)\n    yield construction_sentinel\n    yield term(op, args)\n\n\ndef unify_term(u, v, s):\n    u_op, u_args = operator(u), arguments(u)\n    v_op, v_args = operator(v), arguments(v)\n    s = yield _unify(u_op, v_op, s)\n    if s is not False:\n        s = yield _unify(u_args, v_args, s)\n    yield s\n"
  },
  {
    "path": "kanren/util.py",
    "content": "from collections import namedtuple\nfrom collections.abc import Hashable, Iterable, Mapping, MutableSet, Set\nfrom itertools import chain\n\n\nHashableForm = namedtuple(\"HashableForm\", [\"type\", \"data\"])\n\n\nclass FlexibleSet(MutableSet):\n    \"\"\"A set that uses a list (and costly identity check) for unhashable items.\"\"\"\n\n    __slots__ = (\"set\", \"list\")\n\n    def __init__(self, iterable=None):\n\n        self.set = set()\n        self.list = []\n\n        if iterable is not None:\n            for i in iterable:\n                self.add(i)\n\n    def add(self, item):\n        try:\n            self.set.add(item)\n        except TypeError:\n            # TODO: Could try `make_hashable`.\n            # TODO: Use `bisect` for unhashable but orderable elements\n            if item not in self.list:\n                self.list.append(item)\n\n    def discard(self, item):\n        try:\n            self.remove(item)\n        except KeyError:\n            pass\n\n    def clear(self):\n        self.set.clear()\n        self.list.clear()\n\n    def pop(self):\n        try:\n            return self.set.pop()\n        except (TypeError, KeyError):\n            try:\n                return self.list.pop(-1)\n            except IndexError:\n                raise KeyError()\n\n    def remove(self, item):\n        try:\n            self.set.remove(item)\n        except (TypeError, KeyError):\n            try:\n                self.list.remove(item)\n            except ValueError:\n                raise KeyError()\n\n    def copy(self):\n        res = type(self)()\n        res.set = self.set.copy()\n        res.list = self.list.copy()\n        return res\n\n    def __le__(self, other):\n        raise NotImplementedError()\n\n    def __ge__(self, other):\n        raise NotImplementedError()\n\n    def __iter__(self):\n        return chain(self.set, self.list)\n\n    def __contains__(self, value):\n        try:\n            return value in self.set or value in self.list\n        except TypeError:\n            return value in self.list\n\n    def __len__(self):\n        return len(self.set) + len(self.list)\n\n    def __eq__(self, other):\n        if type(self) == type(other):\n            return self.set == other.set and self.list == other.list\n        elif isinstance(other, Set):\n            return len(self.list) == 0 and other.issuperset(self.set)\n\n        return NotImplemented\n\n    def __repr__(self):\n        return f\"FlexibleSet([{', '.join(str(s) for s in self)}])\"\n\n\ndef hashable(x):\n    try:\n        hash(x)\n        return True\n    except TypeError:\n        return False\n\n\ndef dicthash(d):\n    return hash(frozenset(d.items()))\n\n\ndef make_hashable(x):\n    # TODO: Better as a dispatch function?\n    if hashable(x):\n        return x\n    if isinstance(x, slice):\n        return HashableForm(type(x), (x.start, x.stop, x.step))\n    if isinstance(x, Mapping):\n        return HashableForm(type(x), frozenset(tuple(multihash(i) for i in x.items())))\n    if isinstance(x, Iterable):\n        return HashableForm(type(x), tuple(multihash(i) for i in x))\n    raise TypeError(f\"Hashing not covered for {x}\")\n\n\ndef multihash(x):\n    return hash(make_hashable(x))\n\n\ndef unique(seq, key=lambda x: x):\n    seen = set()\n    for item in seq:\n        try:\n            k = key(item)\n        except TypeError:\n            # Just yield it and hope for the best, since we can't efficiently\n            # check if we've seen it before.\n            yield item\n            continue\n        if not isinstance(k, Hashable):\n            # Just yield it and hope for the best, since we can't efficiently\n            # check if we've seen it before.\n            yield item\n        elif k not in seen:\n            seen.add(key(item))\n            yield item\n\n\ndef intersection(*seqs):\n    return (item for item in seqs[0] if all(item in seq for seq in seqs[1:]))\n\n\ndef groupsizes(total, len):\n    \"\"\"Construct groups of length len that add up to total.\n\n    >>> from kanren.util import groupsizes\n    >>> tuple(groupsizes(4, 2))\n    ((1, 3), (2, 2), (3, 1))\n    \"\"\"\n    if len == 1:\n        yield (total,)\n    else:\n        for i in range(1, total - len + 1 + 1):\n            for perm in groupsizes(total - i, len - 1):\n                yield (i,) + perm\n\n\ndef pprint(g):  # pragma: no cover\n    \"\"\"Pretty print a tree of goals.\"\"\"\n    if callable(g) and hasattr(g, \"__name__\"):\n        return g.__name__\n    if isinstance(g, type):\n        return g.__name__\n    if isinstance(g, tuple):\n        return \"(\" + \", \".join(map(pprint, g)) + \")\"\n    return str(g)\n\n\ndef index(tup, ind):\n    \"\"\"Fancy indexing with tuples.\"\"\"\n    return tuple(tup[i] for i in ind)\n"
  },
  {
    "path": "pyproject.toml",
    "content": "[build-system]\nrequires = [\"setuptools>=77.0.0\", \"setuptools-scm[toml]\"]\nbuild-backend = \"setuptools.build_meta\"\n\n[project]\nname = \"miniKanren\"\ndynamic = ['version']\nrequires-python = \">=3.9\"\nauthors = [{ name = \"Brandon T. Willard\", email = \"brandonwillard+kanren@gmail.com\" }]\ndescription = \"Relational programming in Python\"\nreadme = \"README.md\"\nlicense = \"BSD-3-Clause\"\nlicense-files = [\"LICENSE.txt\"]\nclassifiers = [\n    \"Development Status :: 5 - Production/Stable\",\n    \"Intended Audience :: Science/Research\",\n    \"Intended Audience :: Developers\",\n    \"Operating System :: OS Independent\",\n    \"Programming Language :: Python\",\n    \"Programming Language :: Python :: 3\",\n    \"Programming Language :: Python :: 3.9\",\n    \"Programming Language :: Python :: 3.10\",\n    \"Programming Language :: Python :: 3.11\",\n    \"Programming Language :: Python :: 3.12\",\n    \"Programming Language :: Python :: Implementation :: CPython\",\n    \"Programming Language :: Python :: Implementation :: PyPy\",\n]\n\ndependencies = [\n    \"cons >= 0.4.0\",\n    \"etuples >= 0.3.1\",\n    \"logical-unification >= 0.4.1\",\n    \"toolz\",\n]\n\n[project.urls]\nrepository = \"http://github.com/pythological/kanren\"\n\n[dependency-groups]\ntest = [\n    \"pytest\",\n    \"sympy\",\n]\n\n[tool.setuptools]\ninclude-package-data = true\n\n[tool.setuptools.packages.find]\ninclude = [\"kanren*\"]\nexclude = [\"doc*\", \"examples*\", \"tests*\"]\n\n[tool.setuptools.package-data]\nkanren = [\"py.typed\"]\n\n[tool.setuptools_scm]\nversion_scheme = \"guess-next-dev\"\nlocal_scheme = \"dirty-tag\"\n"
  },
  {
    "path": "pytest.ini",
    "content": "# content of pytest.ini\n[pytest]\naddopts = --doctest-modules\nnorecursedirs = examples\ntestpaths = tests\ndoctest_optionflags= NORMALIZE_WHITESPACE IGNORE_EXCEPTION_DETAIL"
  },
  {
    "path": "release-notes",
    "content": "New in version 0.2\n\n*   Python 3 support\n*   Dictionary unification\n*   Use multiple dispatch to extend unify, reify, isvar\n*   Add convenience class decorator `unifiable` to facilitate trivial \n    unification of user classes\n*   Add term operations term, arguments, operator, also multiply dispatched\n*   Depend on the toolz library\n*   Performance degredation as a result of multiple dispatch\n*   Arithmetic goals\n*   Improved set matching performance\n"
  },
  {
    "path": "requirements.txt",
    "content": "-e ./\ncoveralls\npydocstyle>=3.0.0\npytest>=5.0.0\npytest-cov>=2.6.1\npytest-html>=1.20.0\npylint>=2.3.1\nblack>=19.3b0; platform.python_implementation!='PyPy'\ndiff-cover\nsympy\nversioneer\ncoverage>=5.1\npre-commit\n"
  },
  {
    "path": "setup.cfg",
    "content": "[pydocstyle]\n# Ignore errors for missing docstrings.\n# Ignore D202 (No blank lines allowed after function docstring)\n# due to bug in black: https://github.com/ambv/black/issues/355\nadd-ignore = D100,D101,D102,D103,D104,D105,D106,D107,D202\nconvention = numpy\n\n[tool:pytest]\npython_files=test*.py\ntestpaths=tests\n\n[coverage:run]\nrelative_files = True\nomit =\n    kanren/_version.py\n    tests/*\nbranch = True\n\n[coverage:report]\nexclude_lines =\n    pragma: no cover\n    def __repr__\n    raise AssertionError\n    raise TypeError\n    return NotImplemented\n    raise NotImplementedError\n    if __name__ == .__main__.:\n    assert False\nshow_missing = 1\n\n[isort]\nprofile = black\nlines_after_imports = 2\nlines_between_sections = 1\nhonor_noqa = True\nskip_gitignore = True\n\n[flake8]\nmax-line-length = 88\nextend-ignore = E203, W503\nper-file-ignores =\n    **/__init__.py:F401,E402,F403\n\n[pylint]\nmax-line-length = 88\n\n[pylint.messages_control]\ndisable = C0330, C0326\n\n[mypy]\nignore_missing_imports = True\nno_implicit_optional = True\ncheck_untyped_defs = False\nstrict_equality = True\nwarn_redundant_casts = True\nwarn_unused_configs = True\nwarn_unused_ignores = True\nwarn_return_any = True\nwarn_no_return = False\nwarn_unreachable = True\nshow_error_codes = True\nallow_redefinition = False\nfiles = kanren,tests\n"
  },
  {
    "path": "tests/__init__.py",
    "content": ""
  },
  {
    "path": "tests/test_assoccomm.py",
    "content": "from collections.abc import Sequence\nfrom copy import copy\n\nimport pytest\nfrom cons import cons\nfrom etuples.core import etuple\nfrom unification import isvar, reify, unify, var\n\nfrom kanren.assoccomm import (\n    assoc_args,\n    assoc_flatten,\n    associative,\n    commutative,\n    eq_assoc,\n    eq_assoc_args,\n    eq_assoccomm,\n    eq_comm,\n    flatten_assoc_args,\n)\nfrom kanren.core import run\nfrom kanren.facts import fact\nfrom kanren.term import arguments, operator, term\n\n\n@pytest.fixture(autouse=True)\ndef clear_assoccomm():\n    old_commutative_index = copy(commutative.index)\n    old_commutative_facts = copy(commutative.facts)\n    old_associative_index = copy(associative.index)\n    old_associative_facts = copy(associative.facts)\n    try:\n        yield\n    finally:\n        commutative.index = old_commutative_index\n        commutative.facts = old_commutative_facts\n        associative.index = old_associative_index\n        associative.facts = old_associative_facts\n\n\nclass Node(object):\n    def __init__(self, op, args):\n        self.op = op\n        self.args = args\n\n    def __eq__(self, other):\n        return (\n            type(self) == type(other)\n            and self.op == other.op\n            and self.args == other.args\n        )\n\n    def __hash__(self):\n        return hash((type(self), self.op, self.args))\n\n    def __str__(self):\n        return \"%s(%s)\" % (self.op.name, \", \".join(map(str, self.args)))\n\n    __repr__ = __str__\n\n\nclass Operator(object):\n    def __init__(self, name):\n        self.name = name\n\n\nAdd = Operator(\"add\")\nMul = Operator(\"mul\")\n\n\ndef add(*args):\n    return Node(Add, args)\n\n\ndef mul(*args):\n    return Node(Mul, args)\n\n\n@term.register(Operator, Sequence)\ndef term_Operator(op, args):\n    return Node(op, args)\n\n\n@arguments.register(Node)\ndef arguments_Node(n):\n    return n.args\n\n\n@operator.register(Node)\ndef operator_Node(n):\n    return n.op\n\n\ndef results(g, s=None):\n    if s is None:\n        s = dict()\n    return tuple(g(s))\n\n\ndef test_eq_comm():\n    x, y, z = var(), var(), var()\n\n    comm_op = \"comm_op\"\n\n    fact(commutative, comm_op)\n\n    assert run(0, True, eq_comm(1, 1)) == (True,)\n    assert run(0, True, eq_comm((comm_op, 1, 2, 3), (comm_op, 1, 2, 3))) == (True,)\n\n    assert run(0, True, eq_comm((comm_op, 3, 2, 1), (comm_op, 1, 2, 3))) == (True,)\n    assert run(0, y, eq_comm((comm_op, 3, y, 1), (comm_op, 1, 2, 3))) == (2,)\n    assert run(0, (x, y), eq_comm((comm_op, x, y, 1), (comm_op, 1, 2, 3))) == (\n        (2, 3),\n        (3, 2),\n    )\n    assert run(0, (x, y), eq_comm((comm_op, 2, 3, 1), (comm_op, 1, x, y))) == (\n        (2, 3),\n        (3, 2),\n    )\n\n    assert not run(\n        0, True, eq_comm((\"op\", 3, 2, 1), (\"op\", 1, 2, 3))\n    )  # not commutative\n    assert not run(0, True, eq_comm((3, comm_op, 2, 1), (comm_op, 1, 2, 3)))\n    assert not run(0, True, eq_comm((comm_op, 1, 2, 1), (comm_op, 1, 2, 3)))\n    assert not run(0, True, eq_comm((\"op\", 1, 2, 3), (comm_op, 1, 2, 3)))\n\n    # Test for variable args\n    res = run(4, (x, y), eq_comm(x, y))\n    exp_res_form = (\n        (etuple(comm_op, x, y), etuple(comm_op, y, x)),\n        (x, y),\n        (etuple(etuple(comm_op, x, y)), etuple(etuple(comm_op, y, x))),\n        (etuple(comm_op, x, y, z), etuple(comm_op, x, z, y)),\n    )\n\n    for a, b in zip(res, exp_res_form):\n        s = unify(a, b)\n        assert s is not False\n        assert all(isvar(i) for i in reify((x, y, z), s))\n\n    # Make sure it can unify single elements\n    assert (3,) == run(0, x, eq_comm((comm_op, 1, 2, 3), (comm_op, 2, x, 1)))\n\n    # `eq_comm` should propagate through\n    assert (3,) == run(\n        0, x, eq_comm((\"div\", 1, (comm_op, 1, 2, 3)), (\"div\", 1, (comm_op, 2, x, 1)))\n    )\n    # Now it should not\n    assert () == run(\n        0, x, eq_comm((\"div\", 1, (\"div\", 1, 2, 3)), (\"div\", 1, (\"div\", 2, x, 1)))\n    )\n\n    expected_res = {(1, 2, 3), (2, 1, 3), (3, 1, 2), (1, 3, 2), (2, 3, 1), (3, 2, 1)}\n    assert expected_res == set(\n        run(0, (x, y, z), eq_comm((comm_op, 1, 2, 3), (comm_op, x, y, z)))\n    )\n    assert expected_res == set(\n        run(0, (x, y, z), eq_comm((comm_op, x, y, z), (comm_op, 1, 2, 3)))\n    )\n    assert expected_res == set(\n        run(\n            0,\n            (x, y, z),\n            eq_comm((\"div\", 1, (comm_op, 1, 2, 3)), (\"div\", 1, (comm_op, x, y, z))),\n        )\n    )\n\n    e1 = (comm_op, (comm_op, 1, x), y)\n    e2 = (comm_op, 2, (comm_op, 3, 1))\n    assert run(0, (x, y), eq_comm(e1, e2)) == ((3, 2),)\n\n    e1 = ((comm_op, 3, 1),)\n    e2 = ((comm_op, 1, x),)\n\n    assert run(0, x, eq_comm(e1, e2)) == (3,)\n\n    e1 = (2, (comm_op, 3, 1))\n    e2 = (y, (comm_op, 1, x))\n\n    assert run(0, (x, y), eq_comm(e1, e2)) == ((3, 2),)\n\n    e1 = (comm_op, (comm_op, 1, x), y)\n    e2 = (comm_op, 2, (comm_op, 3, 1))\n\n    assert run(0, (x, y), eq_comm(e1, e2)) == ((3, 2),)\n\n\n@pytest.mark.xfail(reason=\"`applyo`/`buildo` needs to be a constraint.\", strict=True)\ndef test_eq_comm_object():\n    x = var(\"x\")\n\n    fact(commutative, Add)\n    fact(associative, Add)\n\n    assert run(0, x, eq_comm(add(1, 2, 3), add(3, 1, x))) == (2,)\n    assert set(run(0, x, eq_comm(add(1, 2), x))) == set((add(1, 2), add(2, 1)))\n    assert set(run(0, x, eq_assoccomm(add(1, 2, 3), add(1, x)))) == set(\n        (add(2, 3), add(3, 2))\n    )\n\n\ndef test_flatten_assoc_args():\n    op = \"add\"\n\n    def op_pred(x):\n        return x == op\n\n    assert list(flatten_assoc_args(op_pred, [op, 1, 2, 3, 4])) == [op, 1, 2, 3, 4]\n    assert list(flatten_assoc_args(op_pred, [op, 1, 2, [op]])) == [op, 1, 2, [op]]\n    assert list(flatten_assoc_args(op_pred, [[op, 1, 2, [op]]])) == [1, 2, [op]]\n\n    res = list(\n        flatten_assoc_args(\n            op_pred, [[1, 2, op], 3, [op, 4, [op, [op]]], [op, 5], 6, op, 7]\n        )\n    )\n    exp_res = [[1, 2, op], 3, 4, [op], 5, 6, op, 7]\n    assert res == exp_res\n\n\ndef test_assoc_args():\n    op = \"add\"\n\n    def op_pred(x):\n        return x == op\n\n    assert tuple(assoc_args(op, (1, 2, 3), 2)) == (\n        ((op, 1, 2), 3),\n        (1, (op, 2, 3)),\n    )\n    assert tuple(assoc_args(op, [1, 2, 3], 2)) == (\n        [[op, 1, 2], 3],\n        [1, [op, 2, 3]],\n    )\n    assert tuple(assoc_args(op, (1, 2, 3), 1)) == (\n        ((op, 1), 2, 3),\n        (1, (op, 2), 3),\n        (1, 2, (op, 3)),\n    )\n    assert tuple(assoc_args(op, (1, 2, 3), 3)) == ((1, 2, 3),)\n\n    f_rands = flatten_assoc_args(op_pred, (1, (op, 2, 3)))\n    assert tuple(assoc_args(op, f_rands, 2, ctor=tuple)) == (\n        ((op, 1, 2), 3),\n        (1, (op, 2, 3)),\n    )\n\n\ndef test_eq_assoc_args():\n\n    assoc_op = \"assoc_op\"\n\n    fact(associative, assoc_op)\n\n    assert not run(0, True, eq_assoc_args(assoc_op, (1,), [1], n=None))\n    assert run(0, True, eq_assoc_args(assoc_op, (1,), (1,), n=None)) == (True,)\n    assert run(0, True, eq_assoc_args(assoc_op, (1, 1), (1, 1))) == (True,)\n    assert run(0, True, eq_assoc_args(assoc_op, (1, 2, 3), (1, (assoc_op, 2, 3)))) == (\n        True,\n    )\n    assert run(0, True, eq_assoc_args(assoc_op, (1, (assoc_op, 2, 3)), (1, 2, 3))) == (\n        True,\n    )\n    assert run(\n        0, True, eq_assoc_args(assoc_op, (1, (assoc_op, 2, 3), 4), (1, 2, 3, 4))\n    ) == (True,)\n    assert not run(\n        0, True, eq_assoc_args(assoc_op, (1, 2, 3), (1, (assoc_op, 2, 3), 4))\n    )\n\n    x, y = var(), var()\n\n    assert run(0, True, eq_assoc_args(assoc_op, (x,), (x,), n=None)) == (True,)\n    assert run(0, x, eq_assoc_args(assoc_op, x, (y,), n=None)) == ((y,),)\n    assert run(0, x, eq_assoc_args(assoc_op, (y,), x, n=None)) == ((y,),)\n\n    assert run(0, x, eq_assoc_args(assoc_op, (1, x, 4), (1, 2, 3, 4))) == (\n        (assoc_op, 2, 3),\n    )\n    assert run(0, x, eq_assoc_args(assoc_op, (1, 2, 3, 4), (1, x, 4))) == (\n        (assoc_op, 2, 3),\n    )\n    assert run(0, x, eq_assoc_args(assoc_op, [1, x, 4], [1, 2, 3, 4])) == (\n        [assoc_op, 2, 3],\n    )\n    assert run(0, True, eq_assoc_args(assoc_op, (1, 1), (\"other_op\", 1, 1))) == ()\n\n    assert run(0, x, eq_assoc_args(assoc_op, (1, 2, 3), x, n=2)) == (\n        ((assoc_op, 1, 2), 3),\n        (1, (assoc_op, 2, 3)),\n    )\n    assert run(0, x, eq_assoc_args(assoc_op, x, (1, 2, 3), n=2)) == (\n        ((assoc_op, 1, 2), 3),\n        (1, (assoc_op, 2, 3)),\n    )\n\n    assert run(0, x, eq_assoc_args(assoc_op, (1, 2, 3), x)) == (\n        ((assoc_op, 1, 2), 3),\n        (1, (assoc_op, 2, 3)),\n        (1, 2, 3),\n    )\n\n    assert () not in run(0, x, eq_assoc_args(assoc_op, (), x, no_ident=True))\n    assert (1,) not in run(0, x, eq_assoc_args(assoc_op, (1,), x, no_ident=True))\n    assert (1, 2, 3) not in run(\n        0, x, eq_assoc_args(assoc_op, (1, 2, 3), x, no_ident=True)\n    )\n\n    assert (\n        run(\n            0,\n            True,\n            eq_assoc_args(\n                assoc_op,\n                (1, (assoc_op, 2, 3)),\n                (1, (assoc_op, 2, 3)),\n                no_ident=True,\n            ),\n        )\n        == ()\n    )\n\n    assert run(\n        0,\n        True,\n        eq_assoc_args(\n            assoc_op,\n            (1, (assoc_op, 2, 3)),\n            ((assoc_op, 1, 2), 3),\n            no_ident=True,\n        ),\n    ) == (True,)\n\n\ndef test_eq_assoc():\n\n    assoc_op = \"assoc_op\"\n\n    fact(associative, assoc_op)\n\n    assert run(0, True, eq_assoc(1, 1)) == (True,)\n    assert run(0, True, eq_assoc((assoc_op, 1, 2, 3), (assoc_op, 1, 2, 3))) == (True,)\n    assert not run(0, True, eq_assoc((assoc_op, 3, 2, 1), (assoc_op, 1, 2, 3)))\n    assert run(\n        0, True, eq_assoc((assoc_op, (assoc_op, 1, 2), 3), (assoc_op, 1, 2, 3))\n    ) == (True,)\n    assert run(\n        0, True, eq_assoc((assoc_op, 1, 2, 3), (assoc_op, (assoc_op, 1, 2), 3))\n    ) == (True,)\n    o = \"op\"\n    assert not run(0, True, eq_assoc((o, 1, 2, 3), (o, (o, 1, 2), 3)))\n\n    x = var()\n    res = run(0, x, eq_assoc((assoc_op, 1, 2, 3), x, n=2))\n    assert res == (\n        (assoc_op, (assoc_op, 1, 2), 3),\n        (assoc_op, 1, 2, 3),\n        (assoc_op, 1, (assoc_op, 2, 3)),\n    )\n\n    res = run(0, x, eq_assoc(x, (assoc_op, 1, 2, 3), n=2))\n    assert res == (\n        (assoc_op, (assoc_op, 1, 2), 3),\n        (assoc_op, 1, 2, 3),\n        (assoc_op, 1, (assoc_op, 2, 3)),\n    )\n\n    y, z = var(), var()\n\n    # Check results when both arguments are variables\n    res = run(3, (x, y), eq_assoc(x, y))\n    exp_res_form = (\n        (etuple(assoc_op, x, y, z), etuple(assoc_op, etuple(assoc_op, x, y), z)),\n        (x, y),\n        (\n            etuple(etuple(assoc_op, x, y, z)),\n            etuple(etuple(assoc_op, etuple(assoc_op, x, y), z)),\n        ),\n    )\n\n    for a, b in zip(res, exp_res_form):\n        s = unify(a, b)\n        assert s is not False, (a, b)\n        assert all(isvar(i) for i in reify((x, y, z), s))\n\n    # Make sure it works with `cons`\n    res = run(0, (x, y), eq_assoc(cons(x, y), (assoc_op, 1, 2, 3)))\n    assert res == (\n        (assoc_op, ((assoc_op, 1, 2), 3)),\n        (assoc_op, (1, 2, 3)),\n        (assoc_op, (1, (assoc_op, 2, 3))),\n    )\n\n    res = run(1, (x, y), eq_assoc(cons(x, y), (x, z, 2, 3)))\n    assert res == ((assoc_op, ((assoc_op, z, 2), 3)),)\n\n    # Don't use a predicate that can never succeed, e.g.\n    # associative_2 = Relation(\"associative_2\")\n    # run(1, (x, y), eq_assoc(cons(x, y), (x, z), op_predicate=associative_2))\n\n    # Nested expressions should work now\n    expr1 = (assoc_op, 1, 2, (assoc_op, x, 5, 6))\n    expr2 = (assoc_op, (assoc_op, 1, 2), 3, 4, 5, 6)\n    assert run(0, x, eq_assoc(expr1, expr2, n=2)) == ((assoc_op, 3, 4),)\n\n\ndef test_assoc_flatten():\n\n    add = \"add\"\n    mul = \"mul\"\n\n    fact(commutative, add)\n    fact(associative, add)\n    fact(commutative, mul)\n    fact(associative, mul)\n\n    assert run(\n        0,\n        True,\n        assoc_flatten((mul, 1, (add, 2, 3), (mul, 4, 5)), (mul, 1, (add, 2, 3), 4, 5)),\n    ) == (True,)\n\n    x = var()\n    assert run(\n        0,\n        x,\n        assoc_flatten((mul, 1, (add, 2, 3), (mul, 4, 5)), x),\n    ) == ((mul, 1, (add, 2, 3), 4, 5),)\n\n    assert run(\n        0,\n        True,\n        assoc_flatten(\n            (\"op\", 1, (add, 2, 3), (mul, 4, 5)), (\"op\", 1, (add, 2, 3), (mul, 4, 5))\n        ),\n    ) == (True,)\n\n    assert run(0, x, assoc_flatten((\"op\", 1, (add, 2, 3), (mul, 4, 5)), x)) == (\n        (\"op\", 1, (add, 2, 3), (mul, 4, 5)),\n    )\n\n\ndef test_eq_assoccomm():\n    x, y = var(), var()\n\n    ac = \"commassoc_op\"\n\n    fact(commutative, ac)\n    fact(associative, ac)\n\n    assert run(0, True, eq_assoccomm(1, 1)) == (True,)\n    assert run(0, True, eq_assoccomm((1,), (1,))) == (True,)\n    assert run(0, True, eq_assoccomm(x, (1,))) == (True,)\n    assert run(0, True, eq_assoccomm((1,), x)) == (True,)\n\n    # Assoc only\n    assert run(0, True, eq_assoccomm((ac, 1, (ac, 2, 3)), (ac, (ac, 1, 2), 3))) == (\n        True,\n    )\n    # Commute only\n    assert run(0, True, eq_assoccomm((ac, 1, (ac, 2, 3)), (ac, (ac, 3, 2), 1))) == (\n        True,\n    )\n    # Both\n    assert run(0, True, eq_assoccomm((ac, 1, (ac, 3, 2)), (ac, (ac, 1, 2), 3))) == (\n        True,\n    )\n\n    exp_res = set(\n        (\n            (ac, 1, 3, 2),\n            (ac, 1, 2, 3),\n            (ac, 2, 1, 3),\n            (ac, 2, 3, 1),\n            (ac, 3, 1, 2),\n            (ac, 3, 2, 1),\n            (ac, 1, (ac, 2, 3)),\n            (ac, 1, (ac, 3, 2)),\n            (ac, 2, (ac, 1, 3)),\n            (ac, 2, (ac, 3, 1)),\n            (ac, 3, (ac, 1, 2)),\n            (ac, 3, (ac, 2, 1)),\n            (ac, (ac, 2, 3), 1),\n            (ac, (ac, 3, 2), 1),\n            (ac, (ac, 1, 3), 2),\n            (ac, (ac, 3, 1), 2),\n            (ac, (ac, 1, 2), 3),\n            (ac, (ac, 2, 1), 3),\n        )\n    )\n    assert set(run(0, x, eq_assoccomm((ac, 1, (ac, 2, 3)), x))) == exp_res\n    assert set(run(0, x, eq_assoccomm((ac, 1, 3, 2), x))) == exp_res\n    assert set(run(0, x, eq_assoccomm((ac, 2, (ac, 3, 1)), x))) == exp_res\n    # LHS variations\n    assert set(run(0, x, eq_assoccomm(x, (ac, 1, (ac, 2, 3))))) == exp_res\n\n    assert run(0, (x, y), eq_assoccomm((ac, (ac, 1, x), y), (ac, 2, (ac, 3, 1)))) == (\n        (2, 3),\n        (3, 2),\n    )\n\n    assert run(0, True, eq_assoccomm((ac, (ac, 1, 2), 3), (ac, 1, 2, 3))) == (True,)\n    assert run(0, True, eq_assoccomm((ac, 3, (ac, 1, 2)), (ac, 1, 2, 3))) == (True,)\n    assert run(0, True, eq_assoccomm((ac, 1, 1), (\"other_op\", 1, 1))) == ()\n\n    assert run(0, x, eq_assoccomm((ac, 3, (ac, 1, 2)), (ac, 1, x, 3))) == (2,)\n\n    # Both arguments unground\n    op_lv = var()\n    z = var()\n    res = run(4, (x, y), eq_assoccomm(x, y))\n    exp_res_form = (\n        (etuple(op_lv, x, y), etuple(op_lv, y, x)),\n        (y, y),\n        (\n            etuple(etuple(op_lv, x, y)),\n            etuple(etuple(op_lv, y, x)),\n        ),\n        (\n            etuple(op_lv, x, y, z),\n            etuple(op_lv, etuple(op_lv, x, y), z),\n        ),\n    )\n\n    for a, b in zip(res, exp_res_form):\n        s = unify(a, b)\n        assert (\n            op_lv not in s\n            or (s[op_lv],) in associative.facts\n            or (s[op_lv],) in commutative.facts\n        )\n        assert s is not False, (a, b)\n        assert all(isvar(i) for i in reify((x, y, z), s))\n\n\ndef test_assoccomm_algebra():\n\n    add = \"add\"\n    mul = \"mul\"\n\n    fact(commutative, add)\n    fact(associative, add)\n    fact(commutative, mul)\n    fact(associative, mul)\n\n    x, y = var(), var()\n\n    pattern = (mul, (add, 1, x), y)  # (1 + x) * y\n    expr = (mul, 2, (add, 3, 1))  # 2 * (3 + 1)\n\n    assert run(0, (x, y), eq_assoccomm(pattern, expr)) == ((3, 2),)\n\n\ndef test_assoccomm_objects():\n\n    fact(commutative, Add)\n    fact(associative, Add)\n\n    x = var()\n\n    assert run(0, True, eq_assoccomm(add(1, 2, 3), add(3, 1, 2))) == (True,)\n    assert run(0, x, eq_assoccomm(add(1, 2, 3), add(1, 2, x))) == (3,)\n    assert run(0, x, eq_assoccomm(add(1, 2, 3), add(x, 2, 1))) == (3,)\n"
  },
  {
    "path": "tests/test_constraints.py",
    "content": "from itertools import permutations\n\nfrom cons import cons\nfrom pytest import raises\nfrom unification import reify, unify, var\nfrom unification.core import _reify, stream_eval\n\nfrom kanren import conde, eq, run\nfrom kanren.constraints import (\n    ConstrainedState,\n    ConstrainedVar,\n    DisequalityStore,\n    isinstanceo,\n    neq,\n    typeo,\n)\nfrom kanren.core import lconj\nfrom kanren.goals import membero\n\n\ndef test_ConstrainedState():\n\n    a_lv, b_lv = var(), var()\n\n    ks = ConstrainedState()\n\n    assert repr(ks) == \"ConstrainedState({}, {})\"\n\n    assert ks == {}\n    assert {} == ks\n    assert not ks == {a_lv: 1}\n    assert not ks == ConstrainedState({a_lv: 1})\n\n    assert unify(1, 1, ks) is not None\n    assert unify(1, 2, ks) is False\n\n    assert unify(b_lv, a_lv, ks)\n    assert unify(a_lv, b_lv, ks)\n    assert unify(a_lv, b_lv, ks)\n\n    # Now, try that with a constraint (that's never used).\n    ks.constraints[DisequalityStore] = DisequalityStore({a_lv: {1}})\n\n    assert not ks == {a_lv: 1}\n    assert not ks == ConstrainedState({a_lv: 1})\n\n    assert unify(1, 1, ks) is not None\n    assert unify(1, 2, ks) is False\n\n    assert unify(b_lv, a_lv, ks)\n    assert unify(a_lv, b_lv, ks)\n    assert unify(a_lv, b_lv, ks)\n\n    ks = ConstrainedState(\n        {a_lv: 1}, constraints={DisequalityStore: DisequalityStore({b_lv: {1}})}\n    )\n    ks_2 = ks.copy()\n    assert ks == ks_2\n    assert ks is not ks_2\n    assert ks.constraints is not ks_2.constraints\n    assert ks.constraints[DisequalityStore] is not ks_2.constraints[DisequalityStore]\n    assert (\n        ks.constraints[DisequalityStore].lvar_constraints[b_lv]\n        == ks_2.constraints[DisequalityStore].lvar_constraints[b_lv]\n    )\n    assert (\n        ks.constraints[DisequalityStore].lvar_constraints[b_lv]\n        is not ks_2.constraints[DisequalityStore].lvar_constraints[b_lv]\n    )\n\n\ndef test_reify():\n    var_a = var(\"a\")\n\n    ks = ConstrainedState()\n    assert repr(ConstrainedVar(var_a, ks)) == \"~a: {}\"\n\n    de = DisequalityStore({var_a: {1, 2}})\n    ks.constraints[DisequalityStore] = de\n\n    assert repr(de) == \"ConstraintStore(neq: {~a: {1, 2}})\"\n    assert de.constraints_str(var()) == \"\"\n\n    assert repr(ConstrainedVar(var_a, ks)) == \"~a: {neq {1, 2}}\"\n\n    # TODO: Make this work with `reify` when `var('a')` isn't in `ks`.\n    assert isinstance(reify(var_a, ks), ConstrainedVar)\n    assert repr(stream_eval(_reify(var_a, ks))) == \"~a: {neq {1, 2}}\"\n\n\ndef test_ConstraintStore():\n    a_lv, b_lv = var(), var()\n    assert DisequalityStore({a_lv: {1}}) == DisequalityStore({a_lv: {1}})\n    assert DisequalityStore({a_lv: {1}}) != DisequalityStore({a_lv: {1}, b_lv: {}})\n\n    assert a_lv in DisequalityStore({a_lv: {1}})\n\n\ndef test_ConstrainedVar():\n\n    a_lv = var()\n    a_clv = ConstrainedVar(a_lv, ConstrainedState())\n\n    assert a_lv == a_clv\n    assert a_clv == a_lv\n\n    assert hash(a_lv) == hash(a_clv)\n\n    assert a_lv in {a_clv}\n    assert a_clv in {a_lv}\n\n\ndef test_disequality_basic():\n\n    a_lv, b_lv = var(), var()\n\n    ks = ConstrainedState()\n    de = DisequalityStore({a_lv: {1}})\n    ks.constraints[DisequalityStore] = de\n\n    assert unify(a_lv, 1, ks) is False\n\n    ks = unify(a_lv, b_lv, ks)\n    assert unify(b_lv, 1, ks) is False\n\n    res = list(lconj(neq({}, 1))({}))\n    assert len(res) == 1\n\n    res = list(lconj(neq(1, {}))({}))\n    assert len(res) == 1\n\n    res = list(lconj(neq({}, {}))({}))\n    assert len(res) == 0\n\n    res = list(lconj(neq(a_lv, 1))({}))\n    assert len(res) == 1\n    assert isinstance(res[0], ConstrainedState)\n    assert res[0].constraints[DisequalityStore].lvar_constraints[a_lv] == {1}\n\n    res = list(lconj(neq(1, a_lv))({}))\n    assert len(res) == 1\n    assert isinstance(res[0], ConstrainedState)\n    assert res[0].constraints[DisequalityStore].lvar_constraints[a_lv] == {1}\n\n    res = list(lconj(neq(a_lv, 1), neq(a_lv, 2), neq(a_lv, 1))({}))\n    assert len(res) == 1\n    assert isinstance(res[0], ConstrainedState)\n    assert res[0].constraints[DisequalityStore].lvar_constraints[a_lv] == {1, 2}\n\n    res = list(lconj(neq(a_lv, 1), eq(a_lv, 2))({}))\n    assert len(res) == 1\n    assert isinstance(res[0], ConstrainedState)\n    # The constrained variable is already ground and satisfies the constraint,\n    # so it should've been removed from the store\n    assert a_lv not in res[0].constraints[DisequalityStore].lvar_constraints\n    assert res[0][a_lv] == 2\n\n    res = list(lconj(eq(a_lv, 1), neq(a_lv, 1))({}))\n    assert res == []\n\n\ndef test_disequality():\n\n    a_lv, b_lv = var(), var()\n    q_lv, c_lv = var(), var()\n\n    goal_sets = [\n        ([neq(a_lv, 1)], 1),\n        ([neq(cons(1, a_lv), [1]), eq(a_lv, [])], 0),\n        ([neq(cons(1, a_lv), [1]), eq(a_lv, b_lv), eq(b_lv, [])], 0),\n        ([neq([1], cons(1, a_lv)), eq(a_lv, b_lv), eq(b_lv, [])], 0),\n        # TODO FIXME: This one won't work due to an ambiguity in `cons`.\n        # (\n        #     [\n        #         neq([1], cons(1, a_lv)),\n        #         eq(a_lv, b_lv),\n        #         # Both make `cons` produce a list\n        #         conde([eq(b_lv, None)], [eq(b_lv, [])]),\n        #     ],\n        #     0,\n        # ),\n        ([neq(cons(1, a_lv), [1]), eq(a_lv, b_lv), eq(b_lv, tuple())], 1),\n        ([neq([1], cons(1, a_lv)), eq(a_lv, b_lv), eq(b_lv, tuple())], 1),\n        (\n            [\n                neq([1], cons(1, a_lv)),\n                eq(a_lv, b_lv),\n                # The first should fail, the second should succeed\n                conde([eq(b_lv, [])], [eq(b_lv, tuple())]),\n            ],\n            1,\n        ),\n        ([neq(a_lv, 1), eq(a_lv, 1)], 0),\n        ([neq(a_lv, 1), eq(b_lv, 1), eq(a_lv, b_lv)], 0),\n        ([neq(a_lv, 1), eq(b_lv, 1), eq(a_lv, b_lv)], 0),\n        ([neq(a_lv, b_lv), eq(b_lv, c_lv), eq(c_lv, a_lv)], 0),\n    ]\n\n    for i, (goal, num_results) in enumerate(goal_sets):\n        # The order of goals should not matter, so try them all\n        for goal_ord in permutations(goal):\n\n            res = list(lconj(*goal_ord)({}))\n            assert len(res) == num_results, (i, goal_ord)\n\n            res = list(lconj(*goal_ord)(ConstrainedState()))\n            assert len(res) == num_results, (i, goal_ord)\n\n            assert len(run(0, q_lv, *goal_ord)) == num_results, (i, goal_ord)\n\n\ndef test_typeo_basic():\n    a_lv, q_lv = var(), var()\n\n    assert run(0, q_lv, typeo(q_lv, int)) == (q_lv,)\n    assert run(0, q_lv, typeo(1, int)) == (q_lv,)\n    assert run(0, q_lv, typeo(1, str)) == ()\n    assert run(0, q_lv, typeo(\"hi\", str)) == (q_lv,)\n    assert run(0, q_lv, typeo([], q_lv)) == (q_lv,)\n    # Invalid second arg type (i.e. not a type)\n    assert run(0, q_lv, typeo(1, 1)) == ()\n    assert run(0, q_lv, membero(q_lv, (1, \"cat\", 2.2, \"hat\")), typeo(q_lv, str)) == (\n        \"cat\",\n        \"hat\",\n    )\n\n    with raises(ValueError):\n        run(0, q_lv, typeo(a_lv, str), typeo(a_lv, int))\n\n\ndef test_typeo():\n    a_lv, b_lv, q_lv = var(), var(), var()\n\n    goal_sets = [\n        # Logic variable instance type that's immediately ground in another\n        # goal\n        ([typeo(q_lv, int), eq(q_lv, 1)], (1,)),\n        # Use an unhashable constrained term\n        ([typeo(q_lv, list), eq(q_lv, [])], ([],)),\n        # TODO: A constraint parameter that is never ground\n        # ([typeo(a_lv, q_lv), eq(a_lv, 1)], (int,)),\n        # A non-ground, non-logic variable instance argument that changes type\n        # when ground\n        ([typeo(cons(1, a_lv), list), eq(a_lv, [])], (q_lv,)),\n        # Logic variable instance and type arguments\n        ([typeo(q_lv, int), eq(b_lv, 1), eq(b_lv, q_lv)], (1,)),\n        # The same, but with `conde`\n        (\n            [\n                typeo(q_lv, int),\n                # One succeeds, one fails\n                conde([eq(b_lv, 1)], [eq(b_lv, \"hi\")]),\n                eq(b_lv, q_lv),\n            ],\n            (1,),\n        ),\n        # Logic variable instance argument that's eventually grounded to a\n        # mismatched instance type through another logic variable\n        ([typeo(q_lv, int), eq(b_lv, 1.0), eq(b_lv, q_lv)], ()),\n        # Logic variable type argument that's eventually grounded to a\n        # mismatched instance type through another logic variable (i.e. both\n        # arguments are ground to `int` types)\n        ([typeo(q_lv, b_lv), eq(b_lv, int), eq(b_lv, q_lv)], ()),\n        # Logic variable type argument that's eventually grounded to a\n        # mismatched instance type through another logic variable (i.e. both\n        # arguments are ground to the value `1`, which violates the second\n        # argument type expectations)\n        ([typeo(q_lv, b_lv), eq(b_lv, 1), eq(b_lv, q_lv)], ()),\n        # Check a term that's unground by ground enough for this constraint\n        ([typeo(a_lv, tuple), eq([(b_lv,)], a_lv)], ()),\n    ]\n\n    for i, (goal, expected) in enumerate(goal_sets):\n        for goal_ord in permutations(goal):\n            res = run(0, q_lv, *goal_ord)\n            assert res == expected, (i, goal_ord)\n\n\ndef test_instanceo_basic():\n    q_lv = var()\n\n    assert run(0, q_lv, isinstanceo(q_lv, int)) == (q_lv,)\n    assert run(0, q_lv, isinstanceo(1, int)) == (q_lv,)\n    assert run(0, q_lv, isinstanceo(1, object)) == (q_lv,)\n    # NOTE: Not currently supported.\n    # assert run(0, q_lv, isinstanceo(1, (int, object))) == (q_lv,)\n    assert run(0, q_lv, isinstanceo(1, str)) == ()\n    # NOTE: Not currently supported.\n    # assert run(0, q_lv, isinstanceo(1, (str, list))) == ()\n    assert run(0, q_lv, isinstanceo(\"hi\", str)) == (q_lv,)\n    # Invalid second arg type (i.e. not a type)\n    assert run(0, q_lv, isinstanceo(1, 1)) == ()\n\n\ndef test_instanceo():\n    b_lv, q_lv = var(), var()\n\n    goal_sets = [\n        # Logic variable instance type that's immediately ground in another\n        # goal\n        ([isinstanceo(q_lv, list), eq(q_lv, [])], ([],)),\n        # Logic variable in the type argument that's eventually unified with\n        # a valid type for the given instance argument\n        ([isinstanceo([], q_lv), eq(q_lv, list)], (list,)),\n        # Logic variable type argument that's eventually reified to a tuple\n        # containing a valid type for the instance argument\n        # NOTE: Not currently supported.\n        # (\n        #     [isinstanceo([], q_lv), eq(q_lv, (int, b_lv)), eq(b_lv, list)],\n        #     ((int, list),),\n        # ),\n        # A non-ground, non-logic variable instance argument that changes type\n        # when ground\n        ([isinstanceo(cons(1, q_lv), list), eq(q_lv, [])], ([],)),\n        # Logic variable instance argument that's eventually grounded through\n        # another logic variable\n        ([isinstanceo(q_lv, int), eq(b_lv, 1), eq(b_lv, q_lv)], (1,)),\n        # The same, but with `conde`\n        (\n            [\n                isinstanceo(q_lv, int),\n                # One succeeds, one fails\n                conde([eq(b_lv, 1)], [eq(b_lv, \"hi\")]),\n                eq(b_lv, q_lv),\n            ],\n            (1,),\n        ),\n        # Logic variable instance argument that's eventually grounded to a\n        # mismatched instance type through another logic variable\n        ([isinstanceo(q_lv, int), eq(b_lv, 1.0), eq(b_lv, q_lv)], ()),\n        # Logic variable type argument that's eventually grounded to a\n        # mismatched instance type through another logic variable (i.e. both\n        # arguments are ground to `int` types)\n        ([isinstanceo(q_lv, b_lv), eq(b_lv, int), eq(b_lv, q_lv)], ()),\n        # Logic variable type argument that's eventually grounded to a\n        # mismatched instance type through another logic variable (i.e. both\n        # arguments are ground to the value `1`, which violates the second\n        # argument type expectations)\n        ([isinstanceo(q_lv, b_lv), eq(b_lv, 1), eq(b_lv, q_lv)], ()),\n        # Check a term that's unground by ground enough for this constraint\n        ([isinstanceo(q_lv, tuple), eq([(b_lv,)], q_lv)], ()),\n    ]\n\n    for i, (goal, expected) in enumerate(goal_sets):\n        for goal_ord in permutations(goal):\n            res = run(0, q_lv, *goal_ord)\n            assert res == expected, (i, goal_ord)\n"
  },
  {
    "path": "tests/test_core.py",
    "content": "from collections.abc import Iterator\nfrom itertools import count\n\nfrom cons import cons\nfrom pytest import raises\nfrom unification import var\n\nfrom kanren.core import (\n    conde,\n    eq,\n    fail,\n    ground_order,\n    ifa,\n    lall,\n    lany,\n    lconj,\n    lconj_seq,\n    ldisj,\n    ldisj_seq,\n    run,\n    succeed,\n)\n\n\ndef results(g, s=None):\n    if s is None:\n        s = dict()\n    return tuple(g(s))\n\n\ndef test_eq():\n    x = var()\n    assert tuple(eq(x, 2)({})) == ({x: 2},)\n    assert tuple(eq(x, 2)({x: 3})) == ()\n\n\ndef test_lconj_basics():\n\n    a, b = var(), var()\n    res = list(lconj(eq(1, a), eq(2, b))({}))\n    assert res == [{a: 1, b: 2}]\n\n    res = list(lconj(eq(1, a))({}))\n    assert res == [{a: 1}]\n\n    res = list(lconj_seq([])({}))\n    assert res == [{}]\n\n    res = list(lconj(eq(1, a), eq(2, a))({}))\n    assert res == []\n\n    res = list(lconj(eq(1, 2))({}))\n    assert res == []\n\n    res = list(lconj(eq(1, 1))({}))\n    assert res == [{}]\n\n    def gen():\n        for i in [succeed, succeed]:\n            yield i\n\n    res = list(lconj(gen())({}))\n    assert res == [{}]\n\n    def gen():\n        return\n\n    res = list(lconj_seq([gen()])({}))\n    assert res == []\n\n\ndef test_ldisj_basics():\n\n    a = var()\n    res = list(ldisj(eq(1, a))({}))\n    assert res == [{a: 1}]\n\n    res = list(ldisj(eq(1, 2))({}))\n    assert res == []\n\n    res = list(ldisj(eq(1, 1))({}))\n    assert res == [{}]\n\n    res = list(ldisj(eq(1, a), eq(1, a))({}))\n    assert res == [{a: 1}, {a: 1}]\n\n    res = list(ldisj(eq(1, a), eq(2, a))({}))\n    assert res == [{a: 1}, {a: 2}]\n\n    res = list(ldisj_seq([])({}))\n    assert res == [{}]\n\n    def gen():\n        for i in [succeed, succeed]:\n            yield i\n\n    res = list(ldisj(gen())({}))\n    assert res == [{}, {}]\n\n\ndef test_conde_basics():\n\n    a, b = var(), var()\n    res = list(conde([eq(1, a), eq(2, b)], [eq(1, b), eq(2, a)])({}))\n    assert res == [{a: 1, b: 2}, {b: 1, a: 2}]\n\n    res = list(conde([eq(1, a), eq(2, 1)], [eq(1, b), eq(2, a)])({}))\n    assert res == [{b: 1, a: 2}]\n\n    aa, ab, ba, bb, bc = var(), var(), var(), var(), var()\n    res = list(\n        conde(\n            [eq(1, a), conde([eq(11, aa)], [eq(12, ab)])],\n            [\n                eq(1, b),\n                conde([eq(111, ba), eq(112, bb)], [eq(121, bc)]),\n            ],\n        )({})\n    )\n    assert res == [\n        {a: 1, aa: 11},\n        {b: 1, ba: 111, bb: 112},\n        {a: 1, ab: 12},\n        {b: 1, bc: 121},\n    ]\n\n    res = list(conde([eq(1, 2)], [eq(1, 1)])({}))\n    assert res == [{}]\n\n    assert list(lconj(eq(1, 1))({})) == [{}]\n\n    res = list(lconj(conde([eq(1, 2)], [eq(1, 1)]))({}))\n    assert res == [{}]\n\n    res = list(lconj(conde([eq(1, 2)], [eq(1, 1)]), conde([eq(1, 2)], [eq(1, 1)]))({}))\n    assert res == [{}]\n\n\ndef test_lany():\n    x = var()\n    assert len(tuple(lany(eq(x, 2), eq(x, 3))({}))) == 2\n    assert len(tuple(lany(eq(x, 2), eq(x, 3))({}))) == 2\n\n\ndef test_lall():\n    x = var()\n    assert results(lall(eq(x, 2))) == ({x: 2},)\n    assert results(lall(eq(x, 2), eq(x, 3))) == ()\n    assert results(lall()) == ({},)\n    assert run(0, x, lall()) == (x,)\n\n\ndef test_conde():\n    x = var()\n    assert results(conde([eq(x, 2)], [eq(x, 3)])) == ({x: 2}, {x: 3})\n    assert results(conde([eq(x, 2), eq(x, 3)])) == ()\n\n    assert set(run(0, x, conde([eq(x, 2)], [eq(x, 3)]))) == {2, 3}\n    assert set(run(0, x, conde([eq(x, 2), eq(x, 3)]))) == set()\n\n    goals = ([eq(x, i)] for i in count())  # infinite number of goals\n    assert run(1, x, conde(goals)) == (0,)\n    assert run(1, x, conde(goals)) == (1,)\n\n\ndef test_short_circuit():\n    def badgoal(s):\n        raise NotImplementedError()\n\n    x = var(\"x\")\n    tuple(run(5, x, fail, badgoal))  # Does not raise exception\n\n\ndef test_run():\n    x, y, z = var(), var(), var()\n    res = run(None, x, eq(x, 1))\n    assert isinstance(res, Iterator)\n    assert tuple(res) == (1,)\n    assert run(1, x, eq(x, 1)) == (1,)\n    assert run(2, x, eq(x, 1)) == (1,)\n    assert run(0, x, eq(x, 1)) == (1,)\n    assert run(1, x, eq(x, (y, z)), eq(y, 3), eq(z, 4)) == ((3, 4),)\n    assert set(run(2, x, conde([eq(x, 1)], [eq(x, 2)]))) == set((1, 2))\n\n\ndef test_run_output_reify():\n    x = var()\n    assert run(0, (1, 2, x), eq(x, 3)) == ((1, 2, 3),)\n\n\ndef test_lanyseq():\n    x = var()\n    g = lany((eq(x, i) for i in range(3)))\n    assert list(g({})) == [{x: 0}, {x: 1}, {x: 2}]\n    assert list(g({})) == [{x: 0}, {x: 1}, {x: 2}]\n\n    # Test lanyseq with an infinite number of goals.\n    assert set(run(3, x, lany((eq(x, i) for i in count())))) == {0, 1, 2}\n    assert set(run(3, x, lany((eq(x, i) for i in count())))) == {0, 1, 2}\n\n\ndef test_lall_errors():\n    class SomeException(Exception):\n        pass\n\n    def bad_relation():\n        def _bad_relation(s):\n            raise SomeException(\"some exception\")\n\n        return lall(_bad_relation)\n\n    with raises(SomeException):\n        run(0, var(), bad_relation())\n\n\ndef test_dict():\n    x = var()\n    assert run(0, x, eq({1: x}, {1: 2})) == (2,)\n\n\ndef test_ifa():\n    x, y = var(), var()\n\n    assert run(0, (x, y), ifa(lall(eq(x, True), eq(y, 1)), eq(y, 2))) == ((True, 1),)\n    assert run(\n        0, y, eq(x, False), ifa(lall(eq(x, True), eq(y, 1)), lall(eq(y, 2)))\n    ) == (2,)\n    assert (\n        run(\n            0,\n            y,\n            eq(x, False),\n            ifa(lall(eq(x, True), eq(y, 1)), lall(eq(x, True), eq(y, 2))),\n        )\n        == ()\n    )\n\n    assert run(\n        0,\n        y,\n        eq(x, True),\n        ifa(lall(eq(x, True), eq(y, 1)), lall(eq(x, True), eq(y, 2))),\n    ) == (1,)\n\n\ndef test_ground_order():\n    x, y, z = var(), var(), var()\n    assert run(0, x, ground_order((y, [1, z], 1), x)) == ([1, [1, z], y],)\n    a, b, c = var(), var(), var()\n    assert run(0, (a, b, c), ground_order((y, [1, z], 1), (a, b, c))) == (\n        (1, [1, z], y),\n    )\n    res = run(0, z, ground_order([cons(x, y), (x, y)], z))\n    assert res == ([(x, y), cons(x, y)],)\n    res = run(0, z, ground_order([(x, y), cons(x, y)], z))\n    assert res == ([(x, y), cons(x, y)],)\n"
  },
  {
    "path": "tests/test_facts.py",
    "content": "from unification import var\n\nfrom kanren.core import conde, run\nfrom kanren.facts import Relation, fact, facts\n\n\ndef test_relation():\n    parent = Relation()\n    fact(parent, \"Homer\", \"Bart\")\n    fact(parent, \"Homer\", \"Lisa\")\n    fact(parent, \"Marge\", \"Bart\")\n    fact(parent, \"Marge\", \"Lisa\")\n    fact(parent, \"Abe\", \"Homer\")\n    fact(parent, \"Jackie\", \"Marge\")\n\n    x = var(\"x\")\n    assert set(run(5, x, parent(\"Homer\", x))) == set((\"Bart\", \"Lisa\"))\n    assert set(run(5, x, parent(x, \"Bart\"))) == set((\"Homer\", \"Marge\"))\n\n    def grandparent(x, z):\n        y = var()\n        return conde((parent(x, y), parent(y, z)))\n\n    assert set(run(5, x, grandparent(x, \"Bart\"))) == set((\"Abe\", \"Jackie\"))\n\n    foo = Relation(\"foo\")\n    assert \"foo\" in str(foo)\n\n\ndef test_fact():\n    rel = Relation()\n    fact(rel, 1, 2)\n    assert (1, 2) in rel.facts\n    assert (10, 10) not in rel.facts\n\n    facts(rel, (2, 3), (3, 4))\n    assert (2, 3) in rel.facts\n    assert (3, 4) in rel.facts\n\n\ndef test_unify_variable_with_itself_should_not_unify():\n    # Regression test for https://github.com/logpy/logpy/issues/33\n    valido = Relation()\n    fact(valido, \"a\", \"b\")\n    fact(valido, \"b\", \"a\")\n    x = var()\n    assert run(0, x, valido(x, x)) == ()\n\n\ndef test_unify_variable_with_itself_should_unify():\n    valido = Relation()\n    fact(valido, 0, 1)\n    fact(valido, 1, 0)\n    fact(valido, 1, 1)\n    x = var()\n    assert run(0, x, valido(x, x)) == (1,)\n\n\ndef test_unify_tuple():\n    # Tests that adding facts can be unified with unpacked versions of those\n    # facts.\n    valido = Relation()\n    fact(valido, (0, 1))\n    fact(valido, (1, 0))\n    fact(valido, (1, 1))\n    x = var()\n    y = var()\n    assert set(run(0, x, valido((x, y)))) == set([0, 1])\n    assert set(run(0, (x, y), valido((x, y)))) == set([(0, 1), (1, 0), (1, 1)])\n    assert run(0, x, valido((x, x))) == (1,)\n"
  },
  {
    "path": "tests/test_goals.py",
    "content": "import pytest\nfrom cons import cons\nfrom cons.core import ConsPair\nfrom unification import isvar, unify, var\n\nfrom kanren.core import conde, eq, run\nfrom kanren.goals import (\n    appendo,\n    conso,\n    heado,\n    itero,\n    membero,\n    nullo,\n    permuteo,\n    rembero,\n    tailo,\n)\n\n\ndef results(g, s=None):\n    if s is None:\n        s = dict()\n    return tuple(g(s))\n\n\ndef test_heado():\n    x, y, z = var(), var(), var()\n    assert (x, 1) in results(heado(x, (1, 2, 3)))[0].items()\n    assert (x, 1) in results(heado(1, (x, 2, 3)))[0].items()\n    assert results(heado(x, ())) == ()\n\n    assert run(0, x, heado(x, z), conso(1, y, z)) == (1,)\n\n\ndef test_tailo():\n    x, y, z = var(), var(), var()\n\n    assert (x, (2, 3)) in results(tailo(x, (1, 2, 3)))[0].items()\n    assert (x, ()) in results(tailo(x, (1,)))[0].items()\n    assert results(tailo(x, ())) == ()\n\n    assert run(0, y, tailo(y, z), conso(x, (1, 2), z)) == ((1, 2),)\n\n\ndef test_conso():\n    x, y, z = var(), var(), var()\n\n    assert not results(conso(x, y, ()))\n    assert results(conso(1, (2, 3), (1, 2, 3)))\n    assert results(conso(x, (2, 3), (1, 2, 3))) == ({x: 1},)\n    assert results(conso(1, (2, 3), x)) == ({x: (1, 2, 3)},)\n    assert results(conso(x, y, (1, 2, 3))) == ({x: 1, y: (2, 3)},)\n    assert results(conso(x, (2, 3), y)) == ({y: (x, 2, 3)},)\n    assert run(0, x, conso(x, y, z), eq(z, (1, 2, 3))) == (1,)\n\n    # Confirm that custom types are preserved.\n    class mytuple(tuple):\n        def __add__(self, other):\n            return type(self)(super(mytuple, self).__add__(other))\n\n    assert type(results(conso(x, mytuple((2, 3)), y))[0][y]) == mytuple\n\n\ndef test_nullo_itero():\n\n    x, y, z = var(), var(), var()\n    q_lv, a_lv = var(), var()\n\n    assert run(0, q_lv, conso(1, q_lv, [1]), nullo(q_lv))\n    assert run(0, q_lv, nullo(q_lv), conso(1, q_lv, [1]))\n\n    assert not run(0, q_lv, nullo(q_lv, [], ()))\n    assert run(0, [a_lv, q_lv], nullo(q_lv, a_lv, default_ConsNull=tuple)) == (\n        [(), ()],\n    )\n    assert run(0, [a_lv, q_lv], nullo(a_lv, [], q_lv)) == ([[], []],)\n\n    assert ([],) == run(0, q_lv, nullo(q_lv, []))\n    assert ([],) == run(0, q_lv, nullo([], q_lv))\n    assert (None,) == run(0, q_lv, nullo(None, q_lv))\n    assert (tuple(),) == run(0, q_lv, nullo(tuple(), q_lv))\n    assert (q_lv,) == run(0, q_lv, nullo(tuple(), tuple()))\n    assert ([],) == run(0, q_lv, nullo(var(), q_lv))\n    assert ([],) == run(0, q_lv, nullo(q_lv, var()))\n    assert ([],) == run(0, q_lv, nullo(q_lv, q_lv))\n\n    assert isvar(run(0, y, nullo([]))[0])\n    assert isvar(run(0, y, nullo(None))[0])\n    assert run(0, y, nullo(y))[0] == []\n    assert run(0, y, conso(var(), y, [1]), nullo(y))[0] == []\n    assert run(0, y, conso(var(), y, (1,)), nullo(y))[0] == ()\n\n    assert run(1, y, conso(1, x, y), itero(y))[0] == [1]\n    assert run(1, y, conso(1, x, y), conso(2, z, x), itero(y))[0] == [1, 2]\n\n    # Make sure that the remaining results end in logic variables\n    res_2 = run(2, y, conso(1, x, y), conso(2, z, x), itero(y))[1]\n    assert res_2[:2] == [1, 2]\n    assert isvar(res_2[-1])\n\n\ndef test_membero():\n    x, y = var(), var()\n\n    assert set(run(5, x, membero(x, (1, 2, 3)), membero(x, (2, 3, 4)))) == {2, 3}\n\n    assert run(5, x, membero(2, (1, x, 3))) == (2,)\n    assert run(0, x, membero(1, (1, 2, 3))) == (x,)\n    assert run(0, x, membero(1, (2, 3))) == ()\n\n    g = membero(x, (0, 1, 2))\n    assert tuple(r[x] for r in g({})) == (0, 1, 2)\n\n    def in_cons(x, y):\n        if issubclass(type(y), ConsPair):\n            return x == y.car or in_cons(x, y.cdr)\n        else:\n            return False\n\n    res = run(4, x, membero(1, x))\n    assert all(in_cons(1, r) for r in res)\n\n    res = run(4, (x, y), membero(x, y))\n    assert all(in_cons(i, r) for i, r in res)\n\n\ndef test_uneval_membero():\n    x, y = var(), var()\n    assert set(run(100, x, membero(y, ((1, 2, 3), (4, 5, 6))), membero(x, y))) == {\n        1,\n        2,\n        3,\n        4,\n        5,\n        6,\n    }\n\n\ndef test_appendo():\n    q_lv = var()\n    assert run(0, q_lv, appendo((), (1, 2), (1, 2))) == (q_lv,)\n    assert run(0, q_lv, appendo((), (1, 2), 1)) == ()\n    assert run(0, q_lv, appendo((), (1, 2), (1,))) == ()\n    assert run(0, q_lv, appendo((1, 2), (3, 4), (1, 2, 3, 4))) == (q_lv,)\n    assert run(5, q_lv, appendo((1, 2, 3), q_lv, (1, 2, 3, 4, 5))) == ((4, 5),)\n    assert run(5, q_lv, appendo(q_lv, (4, 5), (1, 2, 3, 4, 5))) == ((1, 2, 3),)\n    assert run(5, q_lv, appendo((1, 2, 3), (4, 5), q_lv)) == ((1, 2, 3, 4, 5),)\n\n    q_lv, r_lv = var(), var()\n\n    assert ([1, 2, 3, 4],) == run(0, q_lv, appendo([1, 2], [3, 4], q_lv))\n    assert ([3, 4],) == run(0, q_lv, appendo([1, 2], q_lv, [1, 2, 3, 4]))\n    assert ([1, 2],) == run(0, q_lv, appendo(q_lv, [3, 4], [1, 2, 3, 4]))\n\n    expected_res = set(\n        [\n            ((), (1, 2, 3, 4)),\n            ((1,), (2, 3, 4)),\n            ((1, 2), (3, 4)),\n            ((1, 2, 3), (4,)),\n            ((1, 2, 3, 4), ()),\n        ]\n    )\n    assert expected_res == set(run(0, (q_lv, r_lv), appendo(q_lv, r_lv, (1, 2, 3, 4))))\n\n    res = run(3, (q_lv, r_lv), appendo(q_lv, [3, 4], r_lv))\n    assert len(res) == 3\n    assert any(len(a) > 0 and isvar(a[0]) for a, b in res)\n    assert all(a + [3, 4] == b for a, b in res)\n\n    res = run(0, (q_lv, r_lv), appendo([3, 4], q_lv, r_lv))\n    assert len(res) == 2\n    assert ([], [3, 4]) == res[0]\n    assert all(\n        type(v) == cons for v in unify((var(), cons(3, 4, var())), res[1]).values()\n    )\n\n\n@pytest.mark.skip(\"Misspecified test\")\ndef test_appendo_reorder():\n    # XXX: This test generates goal conjunctions that are non-terminating given\n    # the specified goal ordering.  More specifically, it generates\n    # `lall(appendo(x, y, w), appendo(w, z, ()))`, for which the first\n    # `appendo` produces an infinite stream of results and the second\n    # necessarily fails for all values of the first `appendo` yielding\n    # non-empty `w` unifications.\n    #\n    # The only reason it worked before is the `EarlyGoalError`\n    # and it's implicit goal reordering, which made this case an out-of-place\n    # test for a goal reordering feature that has nothing to do with `appendo`.\n    # Furthermore, the `EarlyGoalError` mechanics do *not* fix this general\n    # problem, and it's trivial to generate an equivalent situation in which\n    # an `EarlyGoalError` is never thrown.\n    #\n    # In other words, it seems like a nice side effect of `EarlyGoalError`, but\n    # it's actually a very costly approach that masks a bigger issue; one that\n    # all miniKanren programmers need to think about when developing.\n\n    x, y, z, w = var(), var(), var(), var()\n    for t in [tuple(range(i)) for i in range(5)]:\n        print(t)\n        for xi, yi in run(0, (x, y), appendo(x, y, t)):\n            assert xi + yi == t\n\n        results = run(2, (x, y, z, w), appendo(x, y, w), appendo(w, z, t))\n        for xi, yi, zi, wi in results:\n            assert xi + yi + zi == t\n\n\ndef test_rembero():\n\n    q_lv = var()\n    assert ([],) == run(0, q_lv, rembero(1, [1], q_lv))\n    assert ([], [1]) == run(0, q_lv, rembero(1, q_lv, []))\n\n    expected_res = (\n        [5, 1, 2, 3, 4],\n        [1, 5, 2, 3, 4],\n        [1, 2, 5, 3, 4],\n        [1, 2, 3, 5, 4],\n        [1, 2, 3, 4],\n        [1, 2, 3, 4, 5],\n    )\n    assert expected_res == run(0, q_lv, rembero(5, q_lv, [1, 2, 3, 4]))\n\n\ndef test_permuteo():\n\n    from itertools import permutations\n\n    a_lv = var()\n    q_lv = var()\n\n    class Blah:\n        def __hash__(self):\n            raise TypeError()\n\n    # An unhashable sequence with an unhashable object in it\n    obj_1 = [Blah()]\n\n    assert results(permuteo((1, 2), (2, 1))) == ({},)\n    assert results(permuteo((1, obj_1), (obj_1, 1))) == ({},)\n    assert results(permuteo([1, 2], [2, 1])) == ({},)\n    assert results(permuteo((1, 2, 2), (2, 1, 2))) == ({},)\n\n    # (1, obj_1, a_lv) == (1, obj_1, a_lv) ==> {a_lv: a_lv}\n    # (1, obj_1, a_lv) == (1, a_lv, obj_1) ==> {a_lv: obj_1}\n    # (1, obj_1, a_lv) == (a_lv, obj_1, 1) ==> {a_lv: 1}\n    assert run(0, a_lv, permuteo((1, obj_1, a_lv), (obj_1, a_lv, 1))) == (\n        1,\n        a_lv,\n        obj_1,\n    )\n\n    assert not results(permuteo((1, 2), (2, 1, 2)))\n    assert not results(permuteo((1, 2), (2, 1, 2)))\n    assert not results(permuteo((1, 2, 3), (2, 1, 2)))\n    assert not results(permuteo((1, 2, 1), (2, 1, 2)))\n    assert not results(permuteo([1, 2, 1], (2, 1, 2)))\n\n    x = var()\n    assert set(run(0, x, permuteo(x, (1, 2, 2)))) == set(\n        ((1, 2, 2), (2, 1, 2), (2, 2, 1))\n    )\n    q_lv = var()\n\n    assert run(0, q_lv, permuteo((1, 2, 3), (q_lv, 2, 1))) == (3,)\n\n    assert run(0, q_lv, permuteo([1, 2, 3], [3, 2, 1]))\n    assert run(0, q_lv, permuteo((1, 2, 3), (3, 2, 1)))\n    assert run(0, q_lv, permuteo([1, 2, 3], [2, 1])) == ()\n    assert run(0, q_lv, permuteo([1, 2, 3], (3, 2, 1))) == ()\n\n    col = [1, 2, 3]\n    exp_res = set(tuple(i) for i in permutations(col))\n\n    # The first term is ground\n    res = run(0, q_lv, permuteo(col, q_lv))\n    assert all(type(r) == type(col) for r in res)\n\n    res = set(tuple(r) for r in res)\n    assert res == exp_res\n\n    # The second term is ground\n    res = run(0, q_lv, permuteo(q_lv, col))\n    assert all(type(r) == type(col) for r in res)\n\n    res = set(tuple(r) for r in res)\n    assert res == exp_res\n\n    a_lv = var()\n    # Neither terms are ground\n    bi_res = run(5, [q_lv, a_lv], permuteo(q_lv, a_lv))\n\n    assert bi_res[0] == [[], []]\n    bi_var_1 = bi_res[1][0][0]\n    assert isvar(bi_var_1)\n    assert bi_res[1][0] == bi_res[1][1] == [bi_var_1]\n    bi_var_2 = bi_res[2][0][1]\n    assert isvar(bi_var_2) and bi_var_1 is not bi_var_2\n    assert bi_res[2][0] == bi_res[2][1] == [bi_var_1, bi_var_2]\n    assert bi_res[3][0] != bi_res[3][1] == [bi_var_2, bi_var_1]\n    bi_var_3 = bi_res[4][0][2]\n    assert bi_res[4][0] == bi_res[4][1] == [bi_var_1, bi_var_2, bi_var_3]\n\n    assert run(0, x, permuteo((1, 2), (1, 2), no_ident=True)) == ()\n    assert run(0, True, permuteo((1, 2), (2, 1), no_ident=True)) == (True,)\n    assert run(0, x, permuteo((), x, no_ident=True)) == ()\n    assert run(0, x, permuteo(x, (), no_ident=True)) == ()\n    assert run(0, x, permuteo((1,), x, no_ident=True)) == ()\n    assert run(0, x, permuteo(x, (1,), no_ident=True)) == ()\n    assert (1, 2, 3) not in run(0, x, permuteo((1, 2, 3), x, no_ident=True))\n    assert (1, 2, 3) not in run(0, x, permuteo(x, (1, 2, 3), no_ident=True))\n    y = var()\n    assert all(a != b for a, b in run(6, [x, y], permuteo(x, y, no_ident=True)))\n\n    def eq_permute(x, y):\n        return conde([eq(x, y)], [permuteo(a, b) for a, b in zip(x, y)])\n\n    assert run(\n        0, True, permuteo((1, (2, 3)), ((3, 2), 1), inner_eq=eq_permute, no_ident=True)\n    ) == (True,)\n"
  },
  {
    "path": "tests/test_graph.py",
    "content": "from functools import partial\nfrom math import exp, log\nfrom numbers import Real\nfrom operator import add, mul\n\nimport pytest\nimport toolz\nfrom cons import cons\nfrom etuples.core import ExpressionTuple, etuple\nfrom unification import isvar, reify, unify, var\n\nfrom kanren import conde, eq, lall, run\nfrom kanren.constraints import isinstanceo\nfrom kanren.graph import eq_length, map_anyo, mapo, reduceo, walko\n\n\nclass OrderedFunction(object):\n    def __init__(self, func):\n        self.func = func\n\n    def __call__(self, *args, **kwargs):\n        return self.func(*args, **kwargs)\n\n    @property\n    def __name__(self):\n        return self.func.__name__\n\n    def __lt__(self, other):\n        return self.__name__ < getattr(other, \"__name__\", str(other))\n\n    def __gt__(self, other):\n        return self.__name__ > getattr(other, \"__name__\", str(other))\n\n    def __repr__(self):\n        return self.__name__\n\n\nadd = OrderedFunction(add)\nmul = OrderedFunction(mul)\nlog = OrderedFunction(log)\nexp = OrderedFunction(exp)\n\n\nExpressionTuple.__lt__ = (\n    lambda self, other: self < (other,)\n    if isinstance(other, int)\n    else tuple(self) < tuple(other)\n)\nExpressionTuple.__gt__ = (\n    lambda self, other: self > (other,)\n    if isinstance(other, int)\n    else tuple(self) > tuple(other)\n)\n\n\ndef single_math_reduceo(expanded_term, reduced_term):\n    \"\"\"Construct a goal for some simple math reductions.\"\"\"\n    x_lv = var()\n    return lall(\n        isinstanceo(x_lv, Real),\n        isinstanceo(x_lv, ExpressionTuple),\n        conde(\n            [\n                eq(expanded_term, etuple(add, x_lv, x_lv)),\n                eq(reduced_term, etuple(mul, 2, x_lv)),\n            ],\n            [eq(expanded_term, etuple(log, etuple(exp, x_lv))), eq(reduced_term, x_lv)],\n        ),\n    )\n\n\nmath_reduceo = partial(reduceo, single_math_reduceo)\n\nterm_walko = partial(\n    walko,\n    rator_goal=eq,\n    null_type=ExpressionTuple,\n    map_rel=partial(map_anyo, null_res=False),\n)\n\n\ndef test_basics():\n    x_lv = var()\n    res = unify(\n        etuple(log, etuple(exp, etuple(log, 1))), etuple(log, etuple(exp, x_lv))\n    )\n    assert res[x_lv] == etuple(log, 1)\n\n\ndef test_reduceo():\n    q_lv = var()\n\n    # Reduce/forward\n    res = run(0, q_lv, math_reduceo(etuple(log, etuple(exp, etuple(log, 1))), q_lv))\n    assert len(res) == 1\n    assert res[0] == etuple(log, 1)\n\n    res = run(\n        0,\n        q_lv,\n        math_reduceo(etuple(log, etuple(exp, etuple(log, etuple(exp, 1)))), q_lv),\n    )\n    assert res[0] == 1\n    assert res[1] == etuple(log, etuple(exp, 1))\n\n    # Expand/backward\n    res = run(3, q_lv, math_reduceo(q_lv, 1))\n    assert res[0] == etuple(log, etuple(exp, 1))\n    assert res[1] == etuple(log, etuple(exp, etuple(log, etuple(exp, 1))))\n\n\ndef test_mapo():\n    q_lv = var()\n\n    def blah(x, y):\n        return conde([eq(x, 1), eq(y, \"a\")], [eq(x, 3), eq(y, \"b\")])\n\n    assert run(0, q_lv, mapo(blah, [], q_lv)) == ([],)\n    assert run(0, q_lv, mapo(blah, [1, 2, 3], q_lv)) == ()\n    assert run(0, q_lv, mapo(blah, [1, 1, 3], q_lv)) == ([\"a\", \"a\", \"b\"],)\n    assert run(0, q_lv, mapo(blah, q_lv, [\"a\", \"a\", \"b\"])) == ([1, 1, 3],)\n\n    exp_res = (\n        [[], []],\n        [[1], [\"a\"]],\n        [[3], [\"b\"]],\n        [[1, 1], [\"a\", \"a\"]],\n        [[3, 1], [\"b\", \"a\"]],\n    )\n\n    a_lv = var()\n    res = run(5, [q_lv, a_lv], mapo(blah, q_lv, a_lv))\n    assert res == exp_res\n\n\ndef test_eq_length():\n    q_lv = var()\n\n    res = run(0, q_lv, eq_length([1, 2, 3], q_lv))\n    assert len(res) == 1 and len(res[0]) == 3 and all(isvar(q) for q in res[0])\n\n    res = run(0, q_lv, eq_length(q_lv, [1, 2, 3]))\n    assert len(res) == 1 and len(res[0]) == 3 and all(isvar(q) for q in res[0])\n\n    res = run(0, q_lv, eq_length(cons(1, q_lv), [1, 2, 3]))\n    assert len(res) == 1 and len(res[0]) == 2 and all(isvar(q) for q in res[0])\n\n    v_lv = var()\n    res = run(3, (q_lv, v_lv), eq_length(q_lv, v_lv, default_ConsNull=tuple))\n    assert len(res) == 3 and all(\n        isinstance(a, tuple)\n        and len(a) == len(b)\n        and (len(a) == 0 or a != b)\n        and all(isvar(r) for r in a)\n        for a, b in res\n    )\n\n\ndef test_map_anyo_types():\n    \"\"\"Make sure that `map_anyo` preserves the types between its arguments.\"\"\"\n    q_lv = var()\n    res = run(1, q_lv, map_anyo(lambda x, y: eq(x, y), [1], q_lv))\n    assert res[0] == [1]\n    res = run(1, q_lv, map_anyo(lambda x, y: eq(x, y), (1,), q_lv))\n    assert res[0] == (1,)\n    res = run(1, q_lv, map_anyo(lambda x, y: eq(x, y), q_lv, (1,)))\n    assert res[0] == (1,)\n    res = run(1, q_lv, map_anyo(lambda x, y: eq(x, y), q_lv, [1]))\n    assert res[0] == [1]\n    res = run(1, q_lv, map_anyo(lambda x, y: eq(x, y), [1, 2], [1, 2]))\n    assert len(res) == 1\n    res = run(1, q_lv, map_anyo(lambda x, y: eq(x, y), [1, 2], [1, 3]))\n    assert len(res) == 0\n    res = run(1, q_lv, map_anyo(lambda x, y: eq(x, y), [1, 2], (1, 2)))\n    assert len(res) == 0\n\n\ndef test_map_anyo_misc():\n    q_lv = var(\"q\")\n\n    res = run(0, q_lv, map_anyo(eq, [1, 2, 3], [1, 2, 3]))\n    # TODO: Remove duplicate results\n    assert len(res) == 7\n    res = run(0, q_lv, map_anyo(eq, [1, 2, 3], [1, 3, 3]))\n    assert len(res) == 0\n\n    def one_to_threeo(x, y):\n        return conde([eq(x, 1), eq(y, 3)])\n\n    res = run(0, q_lv, map_anyo(one_to_threeo, [1, 2, 4, 1, 4, 1, 1], q_lv))\n\n    assert res[0] == [3, 2, 4, 3, 4, 3, 3]\n\n    assert (\n        len(run(4, q_lv, map_anyo(math_reduceo, [etuple(mul, 2, var(\"x\"))], q_lv))) == 0\n    )\n\n    test_res = run(4, q_lv, map_anyo(math_reduceo, [etuple(add, 2, 2), 1], q_lv))\n    assert test_res == ([etuple(mul, 2, 2), 1],)\n\n    test_res = run(4, q_lv, map_anyo(math_reduceo, [1, etuple(add, 2, 2)], q_lv))\n    assert test_res == ([1, etuple(mul, 2, 2)],)\n\n    test_res = run(4, q_lv, map_anyo(math_reduceo, q_lv, var(\"z\")))\n    assert all(isinstance(r, list) for r in test_res)\n\n    test_res = run(4, q_lv, map_anyo(math_reduceo, q_lv, var(\"z\"), tuple))\n    assert all(isinstance(r, tuple) for r in test_res)\n\n    x, y, z = var(), var(), var()\n\n    def test_bin(a, b):\n        return conde([eq(a, 1), eq(b, 2)])\n\n    res = run(10, (x, y), map_anyo(test_bin, x, y, null_type=tuple))\n    exp_res_form = (\n        ((1,), (2,)),\n        ((x, 1), (x, 2)),\n        ((1, 1), (2, 2)),\n        ((x, y, 1), (x, y, 2)),\n        ((1, x), (2, x)),\n        ((x, 1, 1), (x, 2, 2)),\n        ((1, 1, 1), (2, 2, 2)),\n        ((x, y, z, 1), (x, y, z, 2)),\n        ((1, x, 1), (2, x, 2)),\n        ((x, 1, y), (x, 2, y)),\n    )\n\n    for a, b in zip(res, exp_res_form):\n        s = unify(a, b)\n        assert s is not False\n        assert all(isvar(i) for i in reify((x, y, z), s))\n\n\n@pytest.mark.parametrize(\n    \"test_input, test_output\",\n    [\n        ([], ()),\n        ([1], ()),\n        (\n            [\n                etuple(add, 1, 1),\n            ],\n            ([etuple(mul, 2, 1)],),\n        ),\n        ([1, etuple(add, 1, 1)], ([1, etuple(mul, 2, 1)],)),\n        ([etuple(add, 1, 1), 1], ([etuple(mul, 2, 1), 1],)),\n        (\n            [etuple(mul, 2, 1), etuple(add, 1, 1), 1],\n            ([etuple(mul, 2, 1), etuple(mul, 2, 1), 1],),\n        ),\n        (\n            [\n                etuple(add, 1, 1),\n                etuple(log, etuple(exp, 5)),\n            ],\n            (\n                [etuple(mul, 2, 1), 5],\n                [etuple(add, 1, 1), 5],\n                [etuple(mul, 2, 1), etuple(log, etuple(exp, 5))],\n            ),\n        ),\n    ],\n)\ndef test_map_anyo(test_input, test_output):\n    \"\"\"Test `map_anyo` with fully ground terms (i.e. no logic variables).\"\"\"\n    q_lv = var()\n    test_res = run(\n        0,\n        q_lv,\n        map_anyo(math_reduceo, test_input, q_lv),\n    )\n\n    assert len(test_res) == len(test_output)\n\n    test_res = sorted(test_res)\n    test_output = sorted(test_output)\n    # Make sure the first result matches.\n    # TODO: This is fairly implementation-specific (i.e. dependent on the order\n    # in which `condeseq` returns results).\n    if len(test_output) > 0:\n        assert test_res[0] == test_output[0]\n\n    # Make sure all the results match.\n    # TODO: If we want to avoid fixing the output order, convert the lists to\n    # tuples and add everything to a set, then compare.\n    assert test_res == test_output\n\n\ndef test_map_anyo_reverse():\n    \"\"\"Test `map_anyo` in \"reverse\" (i.e. specify the reduced form and generate the un-reduced form).\"\"\"  # noqa: E501\n    # Unbounded reverse\n    q_lv = var()\n    rev_input = [etuple(mul, 2, 1)]\n    test_res = run(4, q_lv, map_anyo(math_reduceo, q_lv, rev_input))\n    assert test_res == (\n        [etuple(add, 1, 1)],\n        [etuple(log, etuple(exp, etuple(add, 1, 1)))],\n        # [etuple(log, etuple(exp, etuple(mul, 2, 1)))],\n        [etuple(log, etuple(exp, etuple(log, etuple(exp, etuple(add, 1, 1)))))],\n        # [etuple(log, etuple(exp, etuple(log, etuple(exp, etuple(mul, 2, 1)))))],\n        [\n            etuple(\n                log,\n                etuple(\n                    exp,\n                    etuple(\n                        log, etuple(exp, etuple(log, etuple(exp, etuple(add, 1, 1))))\n                    ),\n                ),\n            )\n        ],\n    )\n\n    # Guided reverse\n    test_res = run(\n        4,\n        q_lv,\n        map_anyo(math_reduceo, [etuple(add, q_lv, 1)], [etuple(mul, 2, 1)]),\n    )\n\n    assert test_res == (1,)\n\n\ndef test_walko_misc():\n    q_lv = var(prefix=\"q\")\n\n    expr = etuple(add, etuple(mul, 2, 1), etuple(add, 1, 1))\n    res = run(0, q_lv, walko(eq, expr, expr))\n    # TODO: Remove duplicates\n    assert len(res) == 162\n\n    expr2 = etuple(add, etuple(mul, 2, 1), etuple(add, 2, 1))\n    res = run(0, q_lv, walko(eq, expr, expr2))\n    assert len(res) == 0\n\n    def one_to_threeo(x, y):\n        return conde([eq(x, 1), eq(y, 3)])\n\n    res = run(\n        1,\n        q_lv,\n        walko(\n            one_to_threeo,\n            [1, [1, 2, 4], 2, [[4, 1, 1]], 1],\n            q_lv,\n        ),\n    )\n    assert res == ([3, [3, 2, 4], 2, [[4, 3, 3]], 3],)\n\n    assert run(2, q_lv, walko(eq, q_lv, q_lv, null_type=ExpressionTuple)) == (\n        q_lv,\n        etuple(),\n    )\n\n    res = run(\n        1,\n        q_lv,\n        walko(\n            one_to_threeo,\n            etuple(\n                add,\n                1,\n                etuple(mul, etuple(add, 1, 2), 1),\n                etuple(add, etuple(add, 1, 2), 2),\n            ),\n            q_lv,\n            # Only descend into `add` terms\n            rator_goal=lambda x, y: lall(eq(x, add), eq(y, add)),\n        ),\n    )\n\n    assert res == (\n        etuple(\n            add, 3, etuple(mul, etuple(add, 1, 2), 1), etuple(add, etuple(add, 3, 2), 2)\n        ),\n    )\n\n\n@pytest.mark.parametrize(\n    \"test_input, test_output\",\n    [\n        (1, ()),\n        (etuple(add, 1, 1), (etuple(mul, 2, 1),)),\n        (\n            # (2 * 1) + (1 + 1)\n            etuple(add, etuple(mul, 2, 1), etuple(add, 1, 1)),\n            (\n                # 2 * (2 * 1)\n                etuple(mul, 2, etuple(mul, 2, 1)),\n                # (2 * 1) + (2 * 1)\n                etuple(add, etuple(mul, 2, 1), etuple(mul, 2, 1)),\n            ),\n        ),\n        (\n            # (log(exp(2)) * 1) + (1 + 1)\n            etuple(add, etuple(mul, etuple(log, etuple(exp, 2)), 1), etuple(add, 1, 1)),\n            (\n                # 2 * (2 * 1)\n                etuple(mul, 2, etuple(mul, 2, 1)),\n                # (2 * 1) + (2 * 1)\n                etuple(add, etuple(mul, 2, 1), etuple(mul, 2, 1)),\n                # (log(exp(2)) * 1) + (2 * 1)\n                etuple(\n                    add, etuple(mul, etuple(log, etuple(exp, 2)), 1), etuple(mul, 2, 1)\n                ),\n                etuple(add, etuple(mul, 2, 1), etuple(add, 1, 1)),\n            ),\n        ),\n    ],\n)\ndef test_walko(test_input, test_output):\n    \"\"\"Test `walko` with fully ground terms (i.e. no logic variables).\"\"\"\n\n    q_lv = var()\n    term_walko_fp = partial(reduceo, partial(term_walko, single_math_reduceo))\n    test_res = run(\n        len(test_output),\n        q_lv,\n        term_walko_fp(test_input, q_lv),\n        results_filter=toolz.unique,\n    )\n\n    assert len(test_res) == len(test_output)\n\n    test_res = sorted(test_res)\n    test_output = sorted(test_output)\n\n    # Make sure the first result matches.\n    if len(test_output) > 0:\n        assert test_res[0] == test_output[0]\n\n    # Make sure all the results match.\n    assert set(test_res) == set(test_output)\n\n\ndef test_walko_reverse():\n    \"\"\"Test `walko` in \"reverse\" (i.e. specify the reduced form and generate the un-reduced form).\"\"\"  # noqa: E501\n    q_lv = var(\"q\")\n\n    test_res = run(2, q_lv, term_walko(math_reduceo, q_lv, 5))\n    assert test_res == (\n        etuple(log, etuple(exp, 5)),\n        etuple(log, etuple(exp, etuple(log, etuple(exp, 5)))),\n    )\n    assert all(e.eval_obj == 5.0 for e in test_res)\n\n    # Make sure we get some variety in the results\n    test_res = run(2, q_lv, term_walko(math_reduceo, q_lv, etuple(mul, 2, 5)))\n    assert test_res == (\n        # Expansion of the term's root\n        etuple(add, 5, 5),\n        # Expansion in the term's arguments\n        etuple(mul, etuple(log, etuple(exp, 2)), etuple(log, etuple(exp, 5))),\n        # Two step expansion at the root\n        # etuple(log, etuple(exp, etuple(add, 5, 5))),\n        # Expansion into a sub-term\n        # etuple(mul, 2, etuple(log, etuple(exp, 5)))\n    )\n    assert all(e.eval_obj == 10.0 for e in test_res)\n\n    r_lv = var(\"r\")\n    test_res = run(4, [q_lv, r_lv], term_walko(math_reduceo, q_lv, r_lv))\n    expect_res = (\n        [etuple(add, 1, 1), etuple(mul, 2, 1)],\n        [etuple(log, etuple(exp, etuple(add, 1, 1))), etuple(mul, 2, 1)],\n        [etuple(), etuple()],\n        [\n            etuple(add, etuple(mul, 2, 1), etuple(add, 1, 1)),\n            etuple(mul, 2, etuple(mul, 2, 1)),\n        ],\n    )\n    assert list(\n        unify(a1, a2) and unify(b1, b2)\n        for [a1, b1], [a2, b2] in zip(test_res, expect_res)\n    )\n"
  },
  {
    "path": "tests/test_sudoku.py",
    "content": "\"\"\"\nBased off\nhttps://github.com/holtchesley/embedded-logic/blob/master/kanren/sudoku.ipynb\n\"\"\"\nimport pytest\nfrom unification import var\n\nfrom kanren import run\nfrom kanren.core import lall\nfrom kanren.goals import permuteq\n\n\nDIGITS = tuple(range(1, 10))\n\n\ndef get_rows(board):\n    return tuple(board[i : i + 9] for i in range(0, len(board), 9))\n\n\ndef get_columns(rows):\n    return tuple(tuple(x[i] for x in rows) for i in range(0, 9))\n\n\ndef get_square(rows, x, y):\n    return tuple(rows[xi][yi] for xi in range(x, x + 3) for yi in range(y, y + 3))\n\n\ndef get_squares(rows):\n    return tuple(get_square(rows, x, y) for x in range(0, 9, 3) for y in range(0, 9, 3))\n\n\ndef vars(hints):\n    def helper(h):\n        if h in DIGITS:\n            return h\n        else:\n            return var()\n\n    return tuple(helper(x) for x in hints)\n\n\ndef all_numbers(coll):\n    return permuteq(coll, DIGITS)\n\n\ndef sudoku_solver(hints):\n    variables = vars(hints)\n    rows = get_rows(variables)\n    cols = get_columns(rows)\n    sqs = get_squares(rows)\n    return run(\n        1,\n        variables,\n        lall(*(all_numbers(r) for r in rows)),\n        lall(*(all_numbers(c) for c in cols)),\n        lall(*(all_numbers(s) for s in sqs)),\n    )\n\n\n# fmt: off\ndef test_missing_one_entry():\n    example_board = (\n        5, 3, 4, 6, 7, 8, 9, 1, 2,\n        6, 7, 2, 1, 9, 5, 3, 4, 8,\n        1, 9, 8, 3, 4, 2, 5, 6, 7,\n        8, 5, 9, 7, 6, 1, 4, 2, 3,\n        4, 2, 6, 8, 5, 3, 7, 9, 1,\n        7, 1, 3, 9, 2, 4, 8, 5, 6,\n        9, 6, 1, 5, 3, 7, 2, 8, 4,\n        2, 8, 7, 4, 1, 9, 6, 3, 5,\n        3, 4, 5, 2, 8, 6, 0, 7, 9,\n    )\n    expected_solution = (\n        5, 3, 4, 6, 7, 8, 9, 1, 2,\n        6, 7, 2, 1, 9, 5, 3, 4, 8,\n        1, 9, 8, 3, 4, 2, 5, 6, 7,\n        8, 5, 9, 7, 6, 1, 4, 2, 3,\n        4, 2, 6, 8, 5, 3, 7, 9, 1,\n        7, 1, 3, 9, 2, 4, 8, 5, 6,\n        9, 6, 1, 5, 3, 7, 2, 8, 4,\n        2, 8, 7, 4, 1, 9, 6, 3, 5,\n        3, 4, 5, 2, 8, 6, 1, 7, 9,\n    )\n    assert sudoku_solver(example_board)[0] == expected_solution\n\n\n# fmt: off\ndef test_missing_complex_board():\n    example_board = (\n        5, 3, 4, 6, 7, 8, 9, 0, 2,\n        6, 7, 2, 0, 9, 5, 3, 4, 8,\n        0, 9, 8, 3, 4, 2, 5, 6, 7,\n        8, 5, 9, 7, 6, 0, 4, 2, 3,\n        4, 2, 6, 8, 5, 3, 7, 9, 0,\n        7, 0, 3, 9, 2, 4, 8, 5, 6,\n        9, 6, 0, 5, 3, 7, 2, 8, 4,\n        2, 8, 7, 4, 0, 9, 6, 3, 5,\n        3, 4, 5, 2, 8, 6, 0, 7, 9,\n    )\n    expected_solution = (\n        5, 3, 4, 6, 7, 8, 9, 1, 2,\n        6, 7, 2, 1, 9, 5, 3, 4, 8,\n        1, 9, 8, 3, 4, 2, 5, 6, 7,\n        8, 5, 9, 7, 6, 1, 4, 2, 3,\n        4, 2, 6, 8, 5, 3, 7, 9, 1,\n        7, 1, 3, 9, 2, 4, 8, 5, 6,\n        9, 6, 1, 5, 3, 7, 2, 8, 4,\n        2, 8, 7, 4, 1, 9, 6, 3, 5,\n        3, 4, 5, 2, 8, 6, 1, 7, 9,\n    )\n    assert sudoku_solver(example_board)[0] == expected_solution\n\n\n# fmt: off\ndef test_unsolvable():\n    example_board = (\n        5, 3, 4, 6, 7, 8, 9, 1, 2,\n        6, 7, 2, 1, 9, 5, 9, 4, 8,  # Note column 7 has two 9's.\n        1, 9, 8, 3, 4, 2, 5, 6, 7,\n        8, 5, 9, 7, 6, 1, 4, 2, 3,\n        4, 2, 6, 8, 5, 3, 7, 9, 1,\n        7, 1, 3, 9, 2, 4, 8, 5, 6,\n        9, 6, 1, 5, 3, 7, 2, 8, 4,\n        2, 8, 7, 4, 1, 9, 6, 3, 5,\n        3, 4, 5, 2, 8, 6, 0, 7, 9,\n    )\n    assert sudoku_solver(example_board) == ()\n\n\n# fmt: off\n@pytest.mark.skip(reason=\"Currently too slow!\")\ndef test_many_missing_elements():\n    example_board = (\n        5, 3, 0, 0, 7, 0, 0, 0, 0,\n        6, 0, 0, 1, 9, 5, 0, 0, 0,\n        0, 9, 8, 0, 0, 0, 0, 6, 0,\n        8, 0, 0, 0, 6, 0, 0, 0, 3,\n        4, 0, 0, 8, 0, 3, 0, 0, 1,\n        7, 0, 0, 0, 2, 0, 0, 0, 6,\n        0, 6, 0, 0, 0, 0, 2, 8, 0,\n        0, 0, 0, 4, 1, 9, 0, 0, 5,\n        0, 0, 0, 0, 8, 0, 0, 7, 9\n    )\n    assert sudoku_solver(example_board)[0] == (\n        5, 3, 4, 6, 7, 8, 9, 1, 2,\n        6, 7, 2, 1, 9, 5, 3, 4, 8,\n        1, 9, 8, 3, 4, 2, 5, 6, 7,\n        8, 5, 9, 7, 6, 1, 4, 2, 3,\n        4, 2, 6, 8, 5, 3, 7, 9, 1,\n        7, 1, 3, 9, 2, 4, 8, 5, 6,\n        9, 6, 1, 5, 3, 7, 2, 8, 4,\n        2, 8, 7, 4, 1, 9, 6, 3, 5,\n        3, 4, 5, 2, 8, 6, 1, 7, 9\n    )\n\n\n# fmt: off\n@pytest.mark.skip(reason=\"Currently too slow!\")\ndef test_websudoku_easy():\n    # A sudoku from websudoku.com.\n    example_board = (\n        0, 0, 8, 0, 0, 6, 0, 0, 0,\n        0, 0, 4, 3, 7, 9, 8, 0, 0,\n        5, 7, 0, 0, 1, 0, 3, 2, 0,\n        0, 5, 2, 0, 0, 7, 0, 0, 0,\n        0, 6, 0, 5, 9, 8, 0, 4, 0,\n        0, 0, 0, 4, 0, 0, 5, 7, 0,\n        0, 2, 1, 0, 4, 0, 0, 9, 8,\n        0, 0, 9, 6, 2, 3, 1, 0, 0,\n        0, 0, 0, 9, 0, 0, 7, 0, 0,\n    )\n    assert sudoku_solver(example_board) == (\n        9, 3, 8, 2, 5, 6, 4, 1, 7,\n        2, 1, 4, 3, 7, 9, 8, 6, 5,\n        5, 7, 6, 8, 1, 4, 3, 2, 9,\n        4, 5, 2, 1, 3, 7, 9, 8, 6,\n        1, 6, 7, 5, 9, 8, 2, 4, 3,\n        8, 9, 3, 4, 6, 2, 5, 7, 1,\n        3, 2, 1, 7, 4, 5, 6, 9, 8,\n        7, 8, 9, 6, 2, 3, 1, 5, 4,\n        6, 4, 5, 9, 8, 1, 7, 3, 2\n    )\n"
  },
  {
    "path": "tests/test_term.py",
    "content": "from cons import cons\nfrom etuples import etuple\nfrom unification import reify, unify, var\n\nfrom kanren.core import run\nfrom kanren.term import applyo, arguments, operator, term, unifiable_with_term\n\n\n@unifiable_with_term\nclass Node(object):\n    def __init__(self, op, args):\n        self.op = op\n        self.args = args\n\n    def __eq__(self, other):\n        return (\n            type(self) == type(other)\n            and self.op == other.op\n            and self.args == other.args\n        )\n\n    def __hash__(self):\n        return hash((type(self), self.op, self.args))\n\n    def __str__(self):\n        return \"%s(%s)\" % (self.op.name, \", \".join(map(str, self.args)))\n\n    __repr__ = __str__\n\n\nclass Operator(object):\n    def __init__(self, name):\n        self.name = name\n\n\nAdd = Operator(\"add\")\nMul = Operator(\"mul\")\n\n\ndef add(*args):\n    return Node(Add, args)\n\n\ndef mul(*args):\n    return Node(Mul, args)\n\n\nclass Op(object):\n    def __init__(self, name):\n        self.name = name\n\n\n@arguments.register(Node)\ndef arguments_Node(t):\n    return t.args\n\n\n@operator.register(Node)\ndef operator_Node(t):\n    return t.op\n\n\n@term.register(Operator, (list, tuple))\ndef term_Op(op, args):\n    return Node(op, args)\n\n\ndef test_applyo():\n    x = var()\n    assert run(0, x, applyo(\"add\", (1, 2, 3), x)) == ((\"add\", 1, 2, 3),)\n    assert run(0, x, applyo(x, (1, 2, 3), (\"add\", 1, 2, 3))) == (\"add\",)\n    assert run(0, x, applyo(\"add\", x, (\"add\", 1, 2, 3))) == ((1, 2, 3),)\n\n    a_lv, b_lv, c_lv = var(), var(), var()\n\n    from operator import add\n\n    assert run(0, c_lv, applyo(add, (1, 2), c_lv)) == (3,)\n    assert run(0, c_lv, applyo(add, etuple(1, 2), c_lv)) == (3,)\n    assert run(0, c_lv, applyo(add, a_lv, c_lv)) == (cons(add, a_lv),)\n\n    for obj in (\n        (1, 2, 3),\n        (add, 1, 2),\n        [1, 2, 3],\n        [add, 1, 2],\n        etuple(1, 2, 3),\n        etuple(add, 1, 2),\n    ):\n        o_rator, o_rands = operator(obj), arguments(obj)\n        assert run(0, a_lv, applyo(o_rator, o_rands, a_lv)) == (term(o_rator, o_rands),)\n        # Just acts like `conso` here\n        assert run(0, a_lv, applyo(o_rator, a_lv, obj)) == (arguments(obj),)\n        assert run(0, a_lv, applyo(a_lv, o_rands, obj)) == (operator(obj),)\n\n    # Just acts like `conso` here, too\n    assert run(0, c_lv, applyo(a_lv, b_lv, c_lv)) == (cons(a_lv, b_lv),)\n\n    # with pytest.raises(ConsError):\n    assert run(0, a_lv, applyo(a_lv, b_lv, object())) == ()\n    assert run(0, a_lv, applyo(1, 2, a_lv)) == ()\n\n\ndef test_applyo_object():\n    x = var()\n    assert run(0, x, applyo(Add, (1, 2, 3), x)) == (add(1, 2, 3),)\n    assert run(0, x, applyo(x, (1, 2, 3), add(1, 2, 3))) == (Add,)\n    assert run(0, x, applyo(Add, x, add(1, 2, 3))) == ((1, 2, 3),)\n\n\ndef test_unifiable_with_term():\n    add = Operator(\"add\")\n    t = Node(add, (1, 2))\n\n    assert arguments(t) == (1, 2)\n    assert operator(t) == add\n    assert term(operator(t), arguments(t)) == t\n\n    x = var()\n    s = unify(Node(add, (1, x)), Node(add, (1, 2)), {})\n\n    assert s == {x: 2}\n    assert reify(Node(add, (1, x)), s) == Node(add, (1, 2))\n"
  },
  {
    "path": "tests/test_util.py",
    "content": "from pytest import raises\n\nfrom kanren.util import (\n    FlexibleSet,\n    dicthash,\n    groupsizes,\n    hashable,\n    intersection,\n    multihash,\n    unique,\n)\n\n\ndef test_hashable():\n    assert hashable(2)\n    assert hashable((2, 3))\n    assert not hashable({1: 2})\n    assert not hashable((1, {2: 3}))\n\n\ndef test_unique():\n    assert tuple(unique((1, 2, 3))) == (1, 2, 3)\n    assert tuple(unique((1, 2, 1, 3))) == (1, 2, 3)\n\n\ndef test_unique_dict():\n    assert tuple(unique(({1: 2}, {2: 3}), key=dicthash)) == ({1: 2}, {2: 3})\n    assert tuple(unique(({1: 2}, {1: 2}), key=dicthash)) == ({1: 2},)\n\n\ndef test_unique_not_hashable():\n    assert tuple(unique(([1], [1])))\n\n\ndef test_multihash():\n    inputs = 2, (1, 2), [1, 2], {1: 2}, (1, [2]), slice(1, 2)\n    assert all(isinstance(multihash(i), int) for i in inputs)\n\n\ndef test_intersection():\n    a, b, c = (1, 2, 3, 4), (2, 3, 4, 5), (3, 4, 5, 6)\n\n    assert tuple(intersection(a, b, c)) == (3, 4)\n\n\ndef test_groupsizes():\n    assert set(groupsizes(4, 2)) == set(((1, 3), (2, 2), (3, 1)))\n    assert set(groupsizes(5, 2)) == set(((1, 4), (2, 3), (3, 2), (4, 1)))\n    assert set(groupsizes(4, 1)) == set([(4,)])\n    assert set(groupsizes(4, 4)) == set([(1, 1, 1, 1)])\n\n\ndef test_flexibleset():\n\n    test_set = set([1, 2, 4])\n    test_fs = FlexibleSet([1, 2, 4])\n\n    assert test_fs.set == test_set\n    assert test_fs.list == []\n\n    test_fs.discard(3)\n    test_set.discard(3)\n\n    assert test_fs == test_set\n\n    test_fs.discard(2)\n    test_set.discard(2)\n\n    with raises(KeyError):\n        test_set.remove(3)\n    with raises(KeyError):\n        test_fs.remove(3)\n\n    res_fs = test_fs.pop()\n    res_set = test_set.pop()\n\n    assert res_fs == res_set and test_fs == test_set\n\n    test_fs_2 = FlexibleSet([1, 2, [3, 4], {\"a\"}])\n    assert len(test_fs_2) == 4\n    assert test_fs_2.set == {1, 2}\n    assert test_fs_2.list == [[3, 4], {\"a\"}]\n\n    test_fs_2.add(2)\n    test_fs_2.add([3, 4])\n    test_fs_2.add({\"a\"})\n    assert test_fs_2.set == {1, 2}\n    assert test_fs_2.list == [[3, 4], {\"a\"}]\n\n    assert 1 in test_fs_2\n    assert {\"a\"} in test_fs_2\n    assert [3, 4] in test_fs_2\n\n    assert test_fs_2 != test_set\n\n    test_fs_2.discard(3)\n    test_fs_2.discard([3, 4])\n\n    assert test_fs_2.set == {1, 2}\n    assert test_fs_2.list == [{\"a\"}]\n\n    with raises(KeyError):\n        test_fs_2.remove(3)\n    with raises(KeyError):\n        test_fs_2.remove([1, 4])\n\n    test_fs_2.remove({\"a\"})\n\n    assert test_fs_2.set == {1, 2}\n    assert test_fs_2.list == []\n\n    test_fs_2.add([5])\n    pop_var = test_fs_2.pop()\n    assert pop_var not in test_fs_2.set\n    assert test_fs_2.list == [[5]]\n    pop_var = test_fs_2.pop()\n    assert test_fs_2.set == set()\n    assert test_fs_2.list == [[5]]\n    assert [5] == test_fs_2.pop()\n    assert test_fs_2.set == set()\n    assert test_fs_2.list == []\n\n    with raises(KeyError):\n        test_fs_2.pop()\n\n    assert FlexibleSet([1, 2, [3, 4], {\"a\"}]) == FlexibleSet([1, 2, [3, 4], {\"a\"}])\n    assert FlexibleSet([1, 2, [3, 4], {\"a\"}]) != FlexibleSet([1, [3, 4], {\"a\"}])\n\n    test_fs_3 = FlexibleSet([1, 2, [3, 4], {\"a\"}])\n    test_fs_3.clear()\n    assert test_fs_3.set == set()\n    assert test_fs_3.list == list()\n\n    test_fs_3 = FlexibleSet([1, 2, [3, 4], {\"a\"}])\n    assert repr(test_fs_3) == \"FlexibleSet([1, 2, [3, 4], {'a'}])\"\n"
  },
  {
    "path": "tox.ini",
    "content": "[tox]\ninstall_command = pip install {opts} {packages}\nenvlist = py35,pypy35,lint\nindexserver =\n    default = https://pypi.python.org/simple\n\n[testenv]\nusedevelop = True\ncommands =\n    rm -f .coverage\n    py.test --cov=kanren -vv {posargs:kanren}\ndeps =\n    -r{toxinidir}/requirements.txt\n    coverage\n    nose\n    pytest\n    pytest-cov\nwhitelist_externals =\n    rm\n\n[testenv:lint]\ndeps =\n    flake8\ncommands =\n    flake8 kanren\nbasepython = python3.5\n\n[testenv:yapf]\n# Tox target for autoformatting the code for pep8.\ndeps =\n    yapf\ncommands =\n    yapf --recursive kanren --in-place\nbasepython = python3.5\n\n[flake8]\nignore = E731,F811,E712,E127,E126,C901,W503,W504\n"
  }
]