Showing preview only (200K chars total). Download the full file or copy to clipboard to get everything.
Repository: pythological/kanren
Branch: main
Commit: b1ff6cf07312
Files: 48
Total size: 188.2 KB
Directory structure:
gitextract_gnvw45a6/
├── .gitattributes
├── .github/
│ ├── FUNDING.yml
│ └── workflows/
│ ├── pypi.yml
│ └── tests.yml
├── .gitignore
├── .pre-commit-config.yaml
├── .pylintrc
├── LICENSE.txt
├── MANIFEST.in
├── Makefile
├── README.md
├── doc/
│ ├── basic.md
│ └── graphs.md
├── examples/
│ ├── __init__.py
│ ├── account.py
│ ├── commutative.py
│ ├── corleone.py
│ ├── data/
│ │ ├── adjacent-states.txt
│ │ └── coastal-states.txt
│ ├── states.py
│ ├── user_classes.py
│ └── zebra-puzzle.py
├── kanren/
│ ├── __init__.py
│ ├── assoccomm.py
│ ├── constraints.py
│ ├── core.py
│ ├── facts.py
│ ├── goals.py
│ ├── graph.py
│ ├── py.typed
│ ├── term.py
│ └── util.py
├── pyproject.toml
├── pytest.ini
├── release-notes
├── requirements.txt
├── setup.cfg
├── tests/
│ ├── __init__.py
│ ├── test_assoccomm.py
│ ├── test_constraints.py
│ ├── test_core.py
│ ├── test_facts.py
│ ├── test_goals.py
│ ├── test_graph.py
│ ├── test_sudoku.py
│ ├── test_term.py
│ └── test_util.py
└── tox.ini
================================================
FILE CONTENTS
================================================
================================================
FILE: .gitattributes
================================================
kanren/_version.py export-subst
================================================
FILE: .github/FUNDING.yml
================================================
github: [brandonwillard]
================================================
FILE: .github/workflows/pypi.yml
================================================
name: PyPI
on:
push:
branches:
- main
- auto-release
pull_request:
branches: [main]
release:
types: [published]
# Cancels all previous workflow runs for pull requests that have not completed.
concurrency:
# The concurrency group contains the workflow name and the branch name for pull requests
# or the commit hash for any other events.
group: ${{ github.workflow }}-${{ github.event_name == 'pull_request' && github.head_ref || github.sha }}
cancel-in-progress: true
jobs:
build:
name: Build distributions
runs-on: ubuntu-latest
steps:
- uses: actions/checkout@v4
with:
fetch-depth: 0
- uses: actions/setup-python@v5
with:
python-version: "3.10"
- name: Build distributions
run: |
pip install build
python -m build
- name: Check the sdist installs and imports
run: |
mkdir -p test-sdist
cd test-sdist
python -m venv venv-sdist
venv-sdist/bin/python -m pip install ../dist/minikanren-*.tar.gz
- name: Check the wheel installs and imports
run: |
mkdir -p test-wheel
cd test-wheel
python -m venv venv-wheel
venv-wheel/bin/python -m pip install ../dist/minikanren-*.whl
- uses: actions/upload-artifact@v4
with:
name: artifact
path: dist/*
upload_pypi:
name: Upload to PyPI on release
needs: [build]
runs-on: ubuntu-latest
if: github.event_name == 'release' && github.event.action == 'published'
steps:
- uses: actions/download-artifact@v4
with:
name: artifact
path: dist
- uses: pypa/gh-action-pypi-publish@release/v1
with:
user: __token__
password: ${{ secrets.pypi_secret }}
================================================
FILE: .github/workflows/tests.yml
================================================
name: Tests
on:
push:
branches:
- main
pull_request:
branches:
- main
# Cancels all previous workflow runs for pull requests that have not completed.
concurrency:
# The concurrency group contains the workflow name and the branch name for pull requests
# or the commit hash for any other events.
group: ${{ github.workflow }}-${{ github.event_name == 'pull_request' && github.head_ref || github.sha }}
cancel-in-progress: true
jobs:
changes:
name: "Check for changes"
runs-on: ubuntu-latest
outputs:
changes: ${{ steps.changes.outputs.src }}
steps:
- uses: actions/checkout@v4
with:
fetch-depth: 0
- uses: dorny/paths-filter@v3
id: changes
with:
filters: |
python: &python
- 'kanren/**/*.py'
- 'tests/**/*.py'
- '*.py'
src:
- *python
- '.github/**/*.yml'
- 'setup.cfg'
- 'requirements.txt'
- '.coveragerc'
- '.pre-commit-config.yaml'
style:
name: Check code style
needs: changes
runs-on: ubuntu-latest
if: ${{ needs.changes.outputs.changes == 'true' }}
steps:
- uses: actions/checkout@v4
- uses: actions/setup-python@v5
with:
python-version: "3.10"
- uses: pre-commit/action@v3.0.1
test:
needs:
- changes
- style
runs-on: ubuntu-latest
if: ${{ needs.changes.outputs.changes == 'true' && needs.style.result == 'success' }}
strategy:
matrix:
python-version:
- "3.9"
- "3.10"
- "3.11"
- "3.12"
- "pypy3.9"
steps:
- uses: actions/checkout@v4
- uses: actions/setup-python@v5
with:
python-version: ${{ matrix.python-version }}
- name: Install dependencies
run: |
python -m pip install --upgrade pip
if [ -f requirements.txt ]; then pip install -r requirements.txt; fi
- name: Test with pytest
run: |
pytest -v tests/ --cov=kanren --cov-report=xml:./coverage.xml
- name: Coveralls
uses: AndreMiras/coveralls-python-action@develop
with:
parallel: true
flag-name: run-${{ matrix.python-version }}
all-checks:
if: ${{ always() }}
runs-on: ubuntu-latest
name: "All tests"
needs: [changes, style, test]
steps:
- name: Check build matrix status
if: ${{ needs.changes.outputs.changes == 'true' && (needs.style.result != 'success' || needs.test.result != 'success') }}
run: exit 1
upload-coverage:
name: "Upload coverage"
needs: [changes, all-checks]
if: ${{ needs.changes.outputs.changes == 'true' && needs.all-checks.result == 'success' }}
runs-on: ubuntu-latest
steps:
- name: Coveralls Finished
uses: AndreMiras/coveralls-python-action@develop
with:
parallel-finished: true
================================================
FILE: .gitignore
================================================
# Created by https://www.gitignore.io/api/vim,emacs,python
# Edit at https://www.gitignore.io/?templates=vim,emacs,python
### Emacs ###
# -*- mode: gitignore; -*-
*~
\#*\#
/.emacs.desktop
/.emacs.desktop.lock
*.elc
auto-save-list
tramp
.\#*
# Org-mode
.org-id-locations
*_archive
# flymake-mode
*_flymake.*
# eshell files
/eshell/history
/eshell/lastdir
# elpa packages
/elpa/
# reftex files
*.rel
# AUCTeX auto folder
/auto/
# cask packages
.cask/
dist/
# Flycheck
flycheck_*.el
# server auth directory
/server/
# projectiles files
.projectile
# directory configuration
.dir-locals.el
# network security
/network-security.data
### Python ###
# Byte-compiled / optimized / DLL files
__pycache__/
*.py[cod]
*$py.class
# C extensions
*.so
# Distribution / packaging
.Python
build/
develop-eggs/
downloads/
eggs/
.eggs/
lib/
lib64/
parts/
sdist/
var/
wheels/
pip-wheel-metadata/
share/python-wheels/
*.egg-info/
.installed.cfg
*.egg
MANIFEST
# PyInstaller
# Usually these files are written by a python script from a template
# before PyInstaller builds the exe, so as to inject date/other infos into it.
*.manifest
*.spec
# Installer logs
pip-log.txt
pip-delete-this-directory.txt
# Unit test / coverage reports
htmlcov/
.tox/
.nox/
.coverage
.coverage.*
.cache
nosetests.xml
coverage.xml
*.cover
.hypothesis/
.pytest_cache/
testing-report.html
# Translations
*.mo
*.pot
# Django stuff:
*.log
local_settings.py
db.sqlite3
db.sqlite3-journal
# Flask stuff:
instance/
.webassets-cache
# Scrapy stuff:
.scrapy
# Sphinx documentation
docs/_build/
# PyBuilder
target/
# Jupyter Notebook
.ipynb_checkpoints
# IPython
profile_default/
ipython_config.py
# pyenv
.python-version
# pipenv
# According to pypa/pipenv#598, it is recommended to include Pipfile.lock in version control.
# However, in case of collaboration, if having platform-specific dependencies or dependencies
# having no cross-platform support, pipenv may install dependencies that don't work, or not
# install all needed dependencies.
#Pipfile.lock
# celery beat schedule file
celerybeat-schedule
# SageMath parsed files
*.sage.py
# Environments
.env
.venv
env/
venv/
ENV/
env.bak/
venv.bak/
# Spyder project settings
.spyderproject
.spyproject
# Rope project settings
.ropeproject
# mkdocs documentation
/site
# mypy
.mypy_cache/
.dmypy.json
dmypy.json
# Pyre type checker
.pyre/
### Vim ###
# Swap
[._]*.s[a-v][a-z]
[._]*.sw[a-p]
[._]s[a-rt-v][a-z]
[._]ss[a-gi-z]
[._]sw[a-p]
# Session
Session.vim
Sessionx.vim
# Temporary
.netrwhist
# Auto-generated tag files
tags
# Persistent undo
[._]*.un~
# End of https://www.gitignore.io/api/vim,emacs,python
================================================
FILE: .pre-commit-config.yaml
================================================
exclude: |
(?x)^(
versioneer\.py|
kanren/_version\.py|
doc/.*|
bin/.*
)$
repos:
- repo: https://github.com/pre-commit/pre-commit-hooks
rev: v4.4.0
hooks:
- id: debug-statements
exclude: |
(?x)^(
kanren/core\.py|
)$
- id: check-merge-conflict
- repo: https://github.com/psf/black
rev: 22.12.0
hooks:
- id: black
language_version: python3
- repo: https://github.com/pycqa/flake8
rev: 7.0.0
hooks:
- id: flake8
args: ['--ignore=E721,E712,E501']
- repo: https://github.com/pycqa/isort
rev: 5.13.2
hooks:
- id: isort
- repo: https://github.com/PyCQA/autoflake
rev: v2.3.0
hooks:
- id: autoflake
exclude: |
(?x)^(
.*/?__init__\.py|
)$
args: ['--in-place', '--remove-all-unused-imports', '--remove-unused-variables']
- repo: https://github.com/pre-commit/mirrors-mypy
rev: v0.991
hooks:
- id: mypy
additional_dependencies:
- numpy>=1.20
- types-filelock
- types-setuptools
================================================
FILE: .pylintrc
================================================
[MASTER]
# Use multiple processes to speed up Pylint.
jobs=0
# Allow loading of arbitrary C extensions. Extensions are imported into the
# active Python interpreter and may run arbitrary code.
unsafe-load-any-extension=no
# Allow optimization of some AST trees. This will activate a peephole AST
# optimizer, which will apply various small optimizations. For instance, it can
# be used to obtain the result of joining multiple strings with the addition
# operator. Joining a lot of strings can lead to a maximum recursion error in
# Pylint and this flag can prevent that. It has one side effect, the resulting
# AST will be different than the one from reality.
optimize-ast=no
[MESSAGES CONTROL]
# Only show warnings with the listed confidence levels. Leave empty to show
# all. Valid levels: HIGH, INFERENCE, INFERENCE_FAILURE, UNDEFINED
confidence=
# Disable the message, report, category or checker with the given id(s). You
# can either give multiple identifiers separated by comma (,) or put this
# option multiple times (only on the command line, not in the configuration
# file where it should appear only once).You can also use "--disable=all" to
# disable everything first and then reenable specific checks. For example, if
# you want to run only the similarities checker, you can use "--disable=all
# --enable=similarities". If you want to run only the classes checker, but have
# no Warning level messages displayed, use"--disable=all --enable=classes
# --disable=W"
disable=all
# Enable the message, report, category or checker with the given id(s). You can
# either give multiple identifier separated by comma (,) or put this option
# multiple time. See also the "--disable" option for examples.
enable=import-error,
import-self,
reimported,
wildcard-import,
misplaced-future,
relative-import,
deprecated-module,
unpacking-non-sequence,
invalid-all-object,
undefined-all-variable,
used-before-assignment,
cell-var-from-loop,
global-variable-undefined,
dangerous-default-value,
# redefined-builtin,
redefine-in-handler,
unused-import,
unused-wildcard-import,
global-variable-not-assigned,
undefined-loop-variable,
global-at-module-level,
bad-open-mode,
redundant-unittest-assert,
boolean-datetime,
# unused-variable
[REPORTS]
# Set the output format. Available formats are text, parseable, colorized, msvs
# (visual studio) and html. You can also give a reporter class, eg
# mypackage.mymodule.MyReporterClass.
output-format=parseable
# Put messages in a separate file for each module / package specified on the
# command line instead of printing them on stdout. Reports (if any) will be
# written in a file name "pylint_global.[txt|html]".
files-output=no
# Tells whether to display a full report or only the messages
reports=no
# Python expression which should return a note less than 10 (10 is the highest
# note). You have access to the variables errors warning, statement which
# respectively contain the number of errors / warnings messages and the total
# number of statements analyzed. This is used by the global evaluation report
# (RP0004).
evaluation=10.0 - ((float(5 * error + warning + refactor + convention) / statement) * 10)
[BASIC]
# List of builtins function names that should not be used, separated by a comma
bad-functions=map,filter,input
# Good variable names which should always be accepted, separated by a comma
good-names=i,j,k,ex,Run,_
# Bad variable names which should always be refused, separated by a comma
bad-names=foo,bar,baz,toto,tutu,tata
# Colon-delimited sets of names that determine each other's naming style when
# the name regexes allow several styles.
name-group=
# Include a hint for the correct naming format with invalid-name
include-naming-hint=yes
# Regular expression matching correct method names
method-rgx=[a-z_][a-z0-9_]{2,30}$
# Naming hint for method names
method-name-hint=[a-z_][a-z0-9_]{2,30}$
# Regular expression matching correct function names
function-rgx=[a-z_][a-z0-9_]{2,30}$
# Naming hint for function names
function-name-hint=[a-z_][a-z0-9_]{2,30}$
# Regular expression matching correct module names
module-rgx=(([a-z_][a-z0-9_]*)|([A-Z][a-zA-Z0-9]+))$
# Naming hint for module names
module-name-hint=(([a-z_][a-z0-9_]*)|([A-Z][a-zA-Z0-9]+))$
# Regular expression matching correct attribute names
attr-rgx=[a-z_][a-z0-9_]{2,30}$
# Naming hint for attribute names
attr-name-hint=[a-z_][a-z0-9_]{2,30}$
# Regular expression matching correct class attribute names
class-attribute-rgx=([A-Za-z_][A-Za-z0-9_]{2,30}|(__.*__))$
# Naming hint for class attribute names
class-attribute-name-hint=([A-Za-z_][A-Za-z0-9_]{2,30}|(__.*__))$
# Regular expression matching correct constant names
const-rgx=(([A-Z_][A-Z0-9_]*)|(__.*__))$
# Naming hint for constant names
const-name-hint=(([A-Z_][A-Z0-9_]*)|(__.*__))$
# Regular expression matching correct class names
class-rgx=[A-Z_][a-zA-Z0-9]+$
# Naming hint for class names
class-name-hint=[A-Z_][a-zA-Z0-9]+$
# Regular expression matching correct argument names
argument-rgx=[a-z_][a-z0-9_]{2,30}$
# Naming hint for argument names
argument-name-hint=[a-z_][a-z0-9_]{2,30}$
# Regular expression matching correct inline iteration names
inlinevar-rgx=[A-Za-z_][A-Za-z0-9_]*$
# Naming hint for inline iteration names
inlinevar-name-hint=[A-Za-z_][A-Za-z0-9_]*$
# Regular expression matching correct variable names
variable-rgx=[a-z_][a-z0-9_]{2,30}$
# Naming hint for variable names
variable-name-hint=[a-z_][a-z0-9_]{2,30}$
# Regular expression which should only match function or class names that do
# not require a docstring.
no-docstring-rgx=^_
# Minimum line length for functions/classes that require docstrings, shorter
# ones are exempt.
docstring-min-length=-1
[ELIF]
# Maximum number of nested blocks for function / method body
max-nested-blocks=5
[FORMAT]
# Maximum number of characters on a single line.
max-line-length=100
# Regexp for a line that is allowed to be longer than the limit.
ignore-long-lines=^\s*(# )?<?https?://\S+>?$
# Allow the body of an if to be on the same line as the test if there is no
# else.
single-line-if-stmt=no
# List of optional constructs for which whitespace checking is disabled. `dict-
# separator` is used to allow tabulation in dicts, etc.: {1 : 1,\n222: 2}.
# `trailing-comma` allows a space between comma and closing bracket: (a, ).
# `empty-line` allows space-only lines.
no-space-check=trailing-comma,dict-separator
# Maximum number of lines in a module
max-module-lines=1000
# String used as indentation unit. This is usually " " (4 spaces) or "\t" (1
# tab).
indent-string=' '
# Number of spaces of indent required inside a hanging or continued line.
indent-after-paren=4
# Expected format of line ending, e.g. empty (any line ending), LF or CRLF.
expected-line-ending-format=
[LOGGING]
# Logging modules to check that the string format arguments are in logging
# function parameter format
logging-modules=logging
[MISCELLANEOUS]
# List of note tags to take in consideration, separated by a comma.
notes=FIXME,XXX,TODO
[SIMILARITIES]
# Minimum lines number of a similarity.
min-similarity-lines=4
# Ignore comments when computing similarities.
ignore-comments=yes
# Ignore docstrings when computing similarities.
ignore-docstrings=yes
# Ignore imports when computing similarities.
ignore-imports=no
[SPELLING]
# Spelling dictionary name. Available dictionaries: none. To make it working
# install python-enchant package.
spelling-dict=
# List of comma separated words that should not be checked.
spelling-ignore-words=
# A path to a file that contains private dictionary; one word per line.
spelling-private-dict-file=
# Tells whether to store unknown words to indicated private dictionary in
# --spelling-private-dict-file option instead of raising a message.
spelling-store-unknown-words=no
[TYPECHECK]
# Tells whether missing members accessed in mixin class should be ignored. A
# mixin class is detected if its name ends with "mixin" (case insensitive).
ignore-mixin-members=yes
# List of module names for which member attributes should not be checked
# (useful for modules/projects where namespaces are manipulated during runtime
# and thus existing member attributes cannot be deduced by static analysis. It
# supports qualified module names, as well as Unix pattern matching.
ignored-modules=tensorflow.core.framework,tensorflow.python.framework,tensorflow.python.ops.gen_linalg_ops
# List of classes names for which member attributes should not be checked
# (useful for classes with attributes dynamically set). This supports can work
# with qualified names.
ignored-classes=
# List of members which are set dynamically and missed by pylint inference
# system, and so shouldn't trigger E1101 when accessed. Python regular
# expressions are accepted.
generated-members=
[VARIABLES]
# Tells whether we should check for unused import in __init__ files.
init-import=no
# A regular expression matching the name of dummy variables (i.e. expectedly
# not used).
dummy-variables-rgx=_$|dummy
# List of additional names supposed to be defined in builtins. Remember that
# you should avoid to define new builtins when possible.
additional-builtins=
# List of strings which can identify a callback function by name. A callback
# name must start or end with one of those strings.
callbacks=cb_,_cb
[CLASSES]
# List of method names used to declare (i.e. assign) instance attributes.
defining-attr-methods=__init__,__new__,setUp
# List of valid names for the first argument in a class method.
valid-classmethod-first-arg=cls
# List of valid names for the first argument in a metaclass class method.
valid-metaclass-classmethod-first-arg=mcs
# List of member names, which should be excluded from the protected access
# warning.
exclude-protected=_asdict,_fields,_replace,_source,_make
[DESIGN]
# Maximum number of arguments for function / method
max-args=5
# Argument names that match this expression will be ignored. Default to name
# with leading underscore
ignored-argument-names=_.*
# Maximum number of locals for function / method body
max-locals=15
# Maximum number of return / yield for function / method body
max-returns=6
# Maximum number of branch for function / method body
max-branches=12
# Maximum number of statements in function / method body
max-statements=50
# Maximum number of parents for a class (see R0901).
max-parents=7
# Maximum number of attributes for a class (see R0902).
max-attributes=7
# Minimum number of public methods for a class (see R0903).
min-public-methods=2
# Maximum number of public methods for a class (see R0904).
max-public-methods=20
# Maximum number of boolean expressions in a if statement
max-bool-expr=5
[IMPORTS]
# Deprecated modules which should not be used, separated by a comma
deprecated-modules=optparse
# Create a graph of every (i.e. internal and external) dependencies in the
# given file (report RP0402 must not be disabled)
import-graph=
# Create a graph of external dependencies in the given file (report RP0402 must
# not be disabled)
ext-import-graph=
# Create a graph of internal dependencies in the given file (report RP0402 must
# not be disabled)
int-import-graph=
[EXCEPTIONS]
# Exceptions that will emit a warning when being caught. Defaults to
# "Exception"
overgeneral-exceptions=Exception
================================================
FILE: LICENSE.txt
================================================
Copyright (c) 2019 Brandon T. Willard
Copyright (c) 2012 Matthew Rocklin
All rights reserved.
Redistribution and use in source and binary forms, with or without
modification, are permitted provided that the following conditions are met:
a. Redistributions of source code must retain the above copyright notice,
this list of conditions and the following disclaimer.
b. Redistributions in binary form must reproduce the above copyright
notice, this list of conditions and the following disclaimer in the
documentation and/or other materials provided with the distribution.
c. Neither the name of kanren nor the names of its contributors
may be used to endorse or promote products derived from this software
without specific prior written permission.
THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
ARE DISCLAIMED. IN NO EVENT SHALL THE REGENTS OR CONTRIBUTORS BE LIABLE FOR
ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR
SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER
CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH
DAMAGE.
================================================
FILE: MANIFEST.in
================================================
include LICENSE.txt
include pyproject.toml
include README.md
include setup.cfg
graft kanren
prune .github
prune doc
prune examples
prune tests
prune *egg-info
prune *_cache
exclude .gitattributes
exclude .gitignore
exclude .pre-commit-config.yaml
exclude .pylintrc
exclude Makefile
exclude pytest.ini
exclude release-notes
exclude requirements.txt
exclude tox.ini
exclude *venv*
global-exclude *.pyc
global-exclude .DS_Store
global-exclude __pycache__
================================================
FILE: Makefile
================================================
.PHONY: help venv conda docker docstyle format style black test lint check coverage pypi
.DEFAULT_GOAL = help
PYTHON = python3
PIP = pip
CONDA = conda
SHELL = bash
help:
@printf "Usage:\n"
@grep -E '^[a-zA-Z_-]+:.*?# .*$$' $(MAKEFILE_LIST) | awk 'BEGIN {FS = ":.*?# "}; {printf "\033[1;34mmake %-10s\033[0m%s\n", $$1, $$2}'
conda: # Set up a conda environment for development.
@printf "Creating conda environment...\n"
${CONDA} create --yes --name kanren-env python=3.6
( \
${CONDA} activate kanren-env; \
${PIP} install -U pip; \
${PIP} install -r requirements.txt; \
${PIP} install -r requirements-dev.txt; \
${CONDA} deactivate; \
)
@printf "\n\nConda environment created! \033[1;34mRun \`conda activate kanren-env\` to activate it.\033[0m\n\n\n"
venv: # Set up a Python virtual environment for development.
@printf "Creating Python virtual environment...\n"
rm -rf kanren-venv
${PYTHON} -m venv kanren-venv
( \
source kanren-venv/bin/activate; \
${PIP} install -U pip; \
${PIP} install -r requirements.txt; \
deactivate; \
)
@printf "\n\nVirtual environment created! \033[1;34mRun \`source kanren-venv/bin/activate\` to activate it.\033[0m\n\n\n"
docker: # Set up a Docker image for development.
@printf "Creating Docker image...\n"
${SHELL} ./scripts/container.sh --build
docstyle:
@printf "Checking documentation with pydocstyle...\n"
pydocstyle kanren/
@printf "\033[1;34mPydocstyle passes!\033[0m\n\n"
format:
@printf "Checking code style with black...\n"
black --check kanren/ tests/
@printf "\033[1;34mBlack passes!\033[0m\n\n"
style:
@printf "Checking code style with pylint...\n"
pylint kanren/ tests/
@printf "\033[1;34mPylint passes!\033[0m\n\n"
black: # Format code in-place using black.
black kanren/ tests/
test: # Test code using pytest.
pytest -v tests/ kanren/ --cov=kanren/ --cov-report=xml --html=testing-report.html --self-contained-html
coverage: test
diff-cover coverage.xml --compare-branch=main --fail-under=100
build-distribution:
${PYTHON} -m venv .venv
./.venv/bin/pip install --upgrade pip
./.venv/bin/pip install build
./.venv/bin/python -m build .
@echo "Built packages are in dist/"
pypi: build-distribution
${PYTHON} -m venv .venv
./.venv/bin/pip install --upgrade pip
./.venv/bin/pip install twine
twine upload --skip-existing dist/*;
lint: docstyle format style # Lint code using pydocstyle, black and pylint.
check: lint test coverage # Both lint and test code. Runs `make lint` followed by `make test`.
================================================
FILE: README.md
================================================
# `kanren`
[](https://travis-ci.org/pythological/kanren) [](https://coveralls.io/github/pythological/kanren?branch=main) [](https://pypi.org/project/miniKanren/) [](https://gitter.im/pythological/kanren?utm_source=badge&utm_medium=badge&utm_campaign=pr-badge&utm_content=badge)
Logic/relational programming in Python with [miniKanren](http://minikanren.org/).
## Installation
Using `pip`:
```bash
pip install miniKanren
```
Using `conda`:
```bash
conda install -c conda-forge miniKanren
```
## Development
First obtain the project source:
```bash
git clone git@github.com:pythological/kanren.git
cd kanren
```
Install the development dependencies:
```bash
$ pip install -r requirements.txt
```
Set up `pre-commit` hooks:
```bash
$ pre-commit install --install-hooks
```
Tests can be run with the provided `Makefile`:
```bash
make check
```
## Motivation
Logic programming is a general programming paradigm. This implementation however came about specifically to serve as an algorithmic core for Computer Algebra Systems in Python and for the automated generation and optimization of numeric software. Domain specific languages, code generation, and compilers have recently been a hot topic in the Scientific Python community. `kanren` aims to be a low-level core for these projects.
These points—along with `kanren` examples—are covered in the paper ["miniKanren as a Tool for Symbolic Computation in Python"](https://arxiv.org/abs/2005.11644).
## Examples
`kanren` enables one to express sophisticated relations—in the form of *goals*—and generate values that satisfy the relations. The following code is the "Hello, world!" of logic programming; it asks for values of the *logic variable* `x` such that `x == 5`:
```python
>>> from kanren import run, eq, membero, var, lall
>>> x = var()
>>> run(1, x, eq(x, 5))
(5,)
```
Multiple logic variables and goals can be used simultaneously. The following code asks for one list containing the values of `x` and `z` such that `x == z` **and** `z == 3`:
```python
>>> z = var()
>>> run(1, [x, z], eq(x, z),
eq(z, 3))
([3, 3],)
```
`kanren` uses [unification](http://en.wikipedia.org/wiki/Unification_%28computer_science%29) to match forms within expression trees. The following code asks for values of `x` such that `(1, 2) == (1, x)`:
```python
>>> run(1, x, eq((1, 2), (1, x)))
(2,)
```
The above examples use `eq`: a *goal constructor* that creates a goal for unification between two objects. Other goal constructors, such as `membero(item, coll)`, express more sophisticated relations and are often constructed from simpler ones like `eq`. More specifically, `membero` states that `item` is a member of the collection `coll`.
The following example uses `membero` to ask for *all* values of `x`, such that `x` is a member of `(1, 2, 3)` **and** `x` is a member of `(2, 3, 4)`.
```python
>>> run(0, x, membero(x, (1, 2, 3)), # x is a member of (1, 2, 3)
membero(x, (2, 3, 4))) # x is a member of (2, 3, 4)
(2, 3)
```
The examples above made implicit use of the goal constructors `lall` and `lany`, which represent goal *conjunction* and *disjunction*, respectively. Many useful relations can be expressed with `lall`, `lany`, and `eq` alone, but in `kanren` it's also easy to leverage the host language and explicitly create any relation expressible in Python.
### Representing Knowledge
`kanren` stores data as facts that state relationships between terms. The following code creates a parent relationship and uses it to state facts about who is a parent of whom within the Simpsons family:
```python
>>> from kanren import Relation, facts
>>> parent = Relation()
>>> facts(parent, ("Homer", "Bart"),
... ("Homer", "Lisa"),
... ("Abe", "Homer"))
>>> run(1, x, parent(x, "Bart"))
('Homer',)
>>> run(2, x, parent("Homer", x))
('Lisa', 'Bart')
```
We can use intermediate variables for more complex queries. For instance, who is Bart's grandfather?
```python
>>> grandparent_lv, parent_lv = var(), var()
>>> run(1, grandparent_lv, parent(grandparent_lv, parent_lv),
parent(parent_lv, 'Bart'))
('Abe',)
```
We can express the grandfather relationship as a distinct relation by creating a goal constructor:
```python
>>> def grandparent(x, z):
... y = var()
... return lall(parent(x, y), parent(y, z))
>>> run(1, x, grandparent(x, 'Bart'))
('Abe,')
```
## Constraints
`kanren` provides a fully functional constraint system that allows one to restrict unification and object types:
```python
>>> from kanren.constraints import neq, isinstanceo
>>> run(0, x,
... neq(x, 1), # Not "equal" to 1
... neq(x, 3), # Not "equal" to 3
... membero(x, (1, 2, 3)))
(2,)
>>> from numbers import Integral
>>> run(0, x,
... isinstanceo(x, Integral), # `x` must be of type `Integral`
... membero(x, (1.1, 2, 3.2, 4)))
(2, 4)
```
## Graph Relations
`kanren` comes with support for relational graph operations suitable for basic symbolic algebra operations. See the examples in [`doc/graphs.md`](doc/graphs.md).
## Extending `kanren`
`kanren` uses the [`logical-unification` library](https://github.com/pythological/unification) to support pattern matching on user defined types. Essentially, types that can be unified can be used with most `kanren` goals. See the [`logical-unification` project's examples](https://github.com/pythological/unification#examples) for demonstrations of how arbitrary types can be made unifiable.
## About
This project is a fork of [`logpy`](https://github.com/logpy/logpy).
## References
* [Logic Programming on wikipedia](http://en.wikipedia.org/wiki/Logic_programming)
* [miniKanren](http://minikanren.org/), a Scheme library for relational programming on which this library is based. More information can be found in the
[thesis of William
Byrd](https://scholarworks.iu.edu/dspace/bitstream/handle/2022/8777/Byrd_indiana_0093A_10344.pdf).
================================================
FILE: doc/basic.md
================================================
# Basics of `miniKanren`
The design of `miniKanren` is simple. It orchestrates only a few basic operations and yields a lot!
## Terms
Terms can be
- any Python object (e.g. `1`, `[1, 2]`, `object()`, etc.),
- logical variables constructed with `var`—denoted here by a tilde prefix (e.g. `~x`),
- or combinations of the two (e.g. `(1, ~x, 'cat')`)
In short, they are trees in which leaves may be either constants or variables. Constants may be of any Python type.
## Unification
We *unify* two similar terms like `(1, 2)` and `(1, ~x)` to form a *substitution* `{~x: 2}`. We say that `(1, 2)` and `(1, ~x)` unify under the substitution `{~x: 2}`. Variables may assume the value of any term.
`unify` is a function, provided by the [`logical-unification`](https://github.com/pythological/unification) library, that takes two terms, `u` and `v`, and returns a substitution `s`.
Examples that unify
| u | v | s |
|:-----------------:|:-----------------:|:-----------------:|
| 123 | 123 | {} |
| 'cat' | 'cat' | {} |
| (1, 2) | (1, 2) | {} |
| ~x | 1 | {~x: 1} |
| 1 | ~x | {~x: 1} |
| (1, ~x) | (1, 2) | {~x: 2} |
| (1, 1) | (~x, ~x) | {~x: 1} |
| (1, 2, ~x) | (~y, 2, 3) | {~x: 3, ~y: 1} |
Examples that don't unify
| u | v |
|:-----------------:|:-----------------:|
| 123 | 'cat' |
| (1, 2) | 12 |
| (1, ~x) | (2, 2) |
| (1, 2) | (~x, ~x) |
Actually we lied, `unify` also takes a substitution as input. This allows us to keep some history around. For example:
```python
>>> unify((1, 2), (1, x), {}) # normal case
{~x: 2}
>>> unify((1, 2), (1, x), {x: 2}) # x is already two. This is consitent
{~x: 2}
>>> unify((1, 2), (1, x), {x: 3}) # x is already three. This conflicts
False
```
## Reification
Reification is the opposite of unification. `reify` transforms a term with logic variables like `(1, ~x)` and a substitution like `{~x: 2}` into a term without logic variables like `(1, 2)`.
```python
>>> reify((1, x), {x: 2})
(1, 2)
```
## Goals and Goal Constructors
A *goal* is a function from one substitution to a stream of substitutions.
```
goal :: substitution -> [substitutions]
```
We make goals with a *goal constructors*. Goal constructors are the normal building block of a logical program. Lets look at the goal constructor `membero` which states that the first input must be a member of the second input (a collection).
```
goal = membero(x, (1, 2, 3)
```
We can feed this goal a substitution and it will give us a stream of substitutions. Here we'll feed it the substitution with no information and it will tell us that either `x` can be `1` or `x` can be `2` or `x` can be `3`
```python
>>> for s in goal({}):
... print s
{~x: 1}
{~x: 2}
{~x: 3}
```
What if we already know that `x` is `2`?
```python
>>> for s in goal({x: 2}):
... print s
{~x: 2}
```
Remember *goals* are functions from one substitution to a stream of substitutions. Users usually make goals with *goal constructors* like `eq`, or `membero`.
### Stream Functions
After this point `miniKanren` is just a library to manage streams of substitutions.
For example if we know both that `membero(x, (1, 2, 3))` and `membero(x, (2, 3, 4))` then we could do something like the following:
```python
>>> g1 = membero(x, (1, 2, 3))
>>> g2 = membero(x, (2, 3, 4))
>>> for s in g1({}):
... for ss in g2(s):
... print ss
{~x: 2}
{~x: 3}
```
Logic programs can have many goals in complex hierarchies. Writing explicit for loops would quickly become tedious. Instead `miniKanren` provide functions that perform logic-like operations on goal streams.
```
combinator :: [goals] -> goal
```
Two important stream functions are logical all `lall` and logical any `lany`.
```python
>>> g = lall(g1, g2)
>>> for s in g({}):
... print s
{~x: 2}
{~x: 3}
>>> g = lany(g1, g2)
>>> for s in g({}):
... print s
{~x: 1}
{~x: 2}
{~x: 3}
{~x: 4}
```
### Laziness
Goals produce a stream of substitutions. This stream is computed lazily, returning values only as they are needed. `miniKanren` depends on standard Python generators to maintain the necessary state and control flow.
```python
>>> stream = g({})
>>> stream
<generator object unique at 0x2e13690>
>>> next(stream)
{~x: 1}
```
## User Interface
Traditionally programs are run with the `run` function
```python
>>> x = var()
>>> run(0, x, membero(x, (1, 2, 3)), membero(x, (2, 3, 4)))
(2, 3)
```
`run` has an implicit `lall` for the goals at the end of the call. It `reifies` results when it returns so that the user never has to touch logic variables or substitutions.
## Conclusion
These are all the fundamental concepts that exist in `miniKanren`. To summarize:
- *Term*: a Python object, logic variable, or combination of the two
- *Substitution Map*: a dictionary mapping logic variables to terms
- *Unification*: A function that finds logic variable substitutions that make two terms equal
- *Reification*: A function that substitutes logic variables in a term with values given by a substitution map
- *Goal*: A generator function that takes a substitution and yields a stream of substitutions
- *Goal Constructor*: A user-level function that constructs and returns a goal
================================================
FILE: doc/graphs.md
================================================
# Relational Graph Manipulation
In this document, we show how `kanren` can be used to perform symbolic algebra operations *relationally*.
## Setup
First, we import the necessary modules and create a helper function for pretty printing the algebraic expressions.
```python
from math import log, exp
from numbers import Real
from functools import partial
from operator import add, mul
from unification import var
from etuples.core import etuple, ExpressionTuple
from kanren import run, eq, conde, lall
from kanren.core import success
from kanren.graph import walko, reduceo
from kanren.constraints import isinstanceo
# Just some nice formatting
def etuple_str(self):
if len(self) > 0:
return f"{getattr(self[0], '__name__', self[0])}({', '.join(map(str, self[1:]))})"
else:
return 'noop'
ExpressionTuple.__str__ = etuple_str
del ExpressionTuple._repr_pretty_
```
Next, we create a simple goal constructor that implements the algebraic relations `x + x == 2 * x` and `log(exp(x)) == x` and
constrains the input types to real numbers and expression tuples from the [`etuples`](https://github.com/pythological/etuples) package.
```python
def single_math_reduceo(expanded_term, reduced_term):
"""Construct a goal for some simple math reductions."""
# Create a logic variable to represent our variable term "x"
x_lv = var()
# `conde` is a relational version of Lisp's `cond`/if-else; here, each
# "branch" pairs the right- and left-hand sides of a replacement rule with
# the corresponding inputs.
return lall(
isinstanceo(x_lv, Real),
isinstanceo(x_lv, ExpressionTuple),
conde(
# add(x, x) == mul(2, x)
[eq(expanded_term, etuple(add, x_lv, x_lv)),
eq(reduced_term, etuple(mul, 2, x_lv))],
# log(exp(x)) == x
[eq(expanded_term, etuple(log, etuple(exp, x_lv))),
eq(reduced_term, x_lv)]),
)
```
In order to obtain "fully reduced" results, we need to turn `math_reduceo` into a fixed-point-producing relation (i.e. recursive).
```python
math_reduceo = partial(reduceo, single_math_reduceo)
```
We also need a relation that walks term graphs specifically (i.e. graphs composed of operator and operand combinations) and necessarily produces its output in the form of expression tuples.
```python
term_walko = partial(walko, rator_goal=eq, null_type=ExpressionTuple)
```
## Reductions
The following example is a straight-forward reduction—i.e. left-to-right applications of the relations in `math_reduceo`—of the term `add(etuple(add, 3, 3), exp(log(exp(5))))`. This is the direction in which results are normally computed in symbolic algebra libraries.
```python
# This is the term we want to reduce
expanded_term = etuple(add, etuple(add, 3, 3), etuple(exp, etuple(log, etuple(exp, 5))))
# Create a logic variable to represent the results we want to compute
reduced_term = var()
# Asking for 0 results means all results
res = run(3, reduced_term, term_walko(math_reduceo, expanded_term, reduced_term))
```
```python
>>> print('\n'.join((f'{expanded_term} == {r}' for r in res)))
add(add(3, 3), exp(log(exp(5)))) == add(mul(2, 3), exp(5))
add(add(3, 3), exp(log(exp(5)))) == add(add(3, 3), exp(5))
add(add(3, 3), exp(log(exp(5)))) == add(mul(2, 3), exp(log(exp(5))))
```
## Expansions
In this example, we're specifying a grounded reduced term (i.e. `mul(2, 5)`) and an unground expanded term (i.e. the logic variable `q_lv`). We're essentially asking for *graphs that would reduce to `mul(2, 5)`*. Naturally, there are infinitely many graphs that reduce to `mul(2, 5)`, so we're only going to ask for ten of them; nevertheless, miniKanren is inherently capable of handling infinitely many results through its use of lazily evaluated goal streams.
```python
expanded_term = var()
reduced_term = etuple(mul, 2, 5)
# Ask for 10 results of `q_lv`
res = run(10, expanded_term, term_walko(math_reduceo, expanded_term, reduced_term))
```
```python
>>> rjust = max(map(lambda x: len(str(x)), res))
>>> print('\n'.join((f'{str(r):>{rjust}} == {reduced_term}' for r in res)))
add(5, 5) == mul(2, 5)
mul(log(exp(2)), log(exp(5))) == mul(2, 5)
log(exp(add(5, 5))) == mul(2, 5)
mul(2, log(exp(5))) == mul(2, 5)
log(exp(log(exp(add(5, 5))))) == mul(2, 5)
mul(log(exp(log(exp(2)))), log(exp(5))) == mul(2, 5)
log(exp(log(exp(log(exp(add(5, 5))))))) == mul(2, 5)
mul(2, log(exp(log(exp(5))))) == mul(2, 5)
log(exp(log(exp(log(exp(log(exp(add(5, 5))))))))) == mul(2, 5)
mul(log(exp(log(exp(log(exp(2)))))), log(exp(5))) == mul(2, 5)
```
## Expansions _and_ Reductions
Now, we set **both** term graphs to unground logic variables.
```python
expanded_term = var()
reduced_term = var()
res = run(10, [expanded_term, reduced_term],
term_walko(math_reduceo, expanded_term, reduced_term))
```
```python
>>> rjust = max(map(lambda x: len(str(x[0])), res))
>>> print('\n'.join((f'{str(e):>{rjust}} == {str(r)}' for e, r in res)))
add(~_2291, ~_2291) == mul(2, ~_2291)
~_2288() == ~_2288()
log(exp(add(~_2297, ~_2297))) == mul(2, ~_2297)
~_2288(add(~_2303, ~_2303)) == ~_2288(mul(2, ~_2303))
log(exp(log(exp(add(~_2309, ~_2309))))) == mul(2, ~_2309)
~_2288(~_2294) == ~_2288(~_2294)
log(exp(log(exp(log(exp(add(~_2315, ~_2315))))))) == mul(2, ~_2315)
~_2288(~_2300()) == ~_2288(~_2300())
log(exp(log(exp(log(exp(log(exp(add(~_2325, ~_2325))))))))) == mul(2, ~_2325)
~_2288(~_2294, add(~_2331, ~_2331)) == ~_2288(~_2294, mul(2, ~_2331))
```
The symbols prefixed by `~` are the string form of logic variables, so a result like `add(~_2291, ~_2291)` essentially means `add(x, x)` for some variable `x`. In this instance, miniKanren has used our algebraic relations in `math_reduceo` to produce more relations—even some with variable operators with multiple arities!
With additional goals, we can narrow-in on very specific types of expressions. In the following, we state that `expanded_term` must be the [`cons`](https://github.com/pythological/python-cons) of a `log` and logic variable (i.e. anything else). In other words, we're stating that the operator of `expanded_term` must be a `log`, or that we want all expressions expanding to a `log`.
```python
from kanren.goals import conso
res = run(10, [expanded_term, reduced_term],
conso(log, var(), expanded_term),
term_walko(math_reduceo, expanded_term, reduced_term))
```
```python
>>> rjust = max(map(lambda x: len(str(x[0])), res))
>>> print('\n'.join((f'{str(e):>{rjust}} == {str(r)}' for e, r in res)))
log(exp(add(~_2344, ~_2344))) == mul(2, ~_2344)
log() == log()
log(exp(~reduced_2285)) == ~reduced_2285
log(add(~_2354, ~_2354)) == log(mul(2, ~_2354))
log(exp(log(exp(add(~_2360, ~_2360))))) == mul(2, ~_2360)
log(~_2347) == log(~_2347)
log(exp(log(exp(log(exp(add(~_2366, ~_2366))))))) == mul(2, ~_2366)
log(~_2351()) == log(~_2351())
log(exp(log(exp(log(exp(log(exp(add(~_2376, ~_2376))))))))) == mul(2, ~_2376)
log(~_2347, add(~_2382, ~_2382)) == log(~_2347, mul(2, ~_2382))
```
The output contains a nullary `log` function, which isn't a valid expression. We can restrict this type of output by further stating that the `log` expression's `cdr` term is itself the result of a `cons` and, thus, not an empty sequence.
```python
exp_term_cdr = var()
res = run(10, [expanded_term, reduced_term],
conso(log, exp_term_cdr, expanded_term),
conso(var(), var(), exp_term_cdr),
term_walko(math_reduceo, expanded_term, reduced_term))
```
```python
>>> rjust = max(map(lambda x: len(str(x[0])), res))
>>> print('\n'.join((f'{str(e):>{rjust}} == {str(r)}' for e, r in res)))
log(exp(add(~_2457, ~_2457))) == mul(2, ~_2457)
log(add(~_2467, ~_2467)) == log(mul(2, ~_2467))
log(exp(~_2446)) == ~_2446
log(~_2460) == log(~_2460)
log(exp(log(exp(add(~_2477, ~_2477))))) == mul(2, ~_2477)
log(~_2464()) == log(~_2464())
log(exp(log(exp(log(exp(add(~_2487, ~_2487))))))) == mul(2, ~_2487)
log(~_2460, add(~_2493, ~_2493)) == log(~_2460, mul(2, ~_2493))
log(exp(log(exp(log(exp(log(exp(add(~_2499, ~_2499))))))))) == mul(2, ~_2499)
log(log(exp(add(~_2501, ~_2501)))) == log(mul(2, ~_2501))
```
================================================
FILE: examples/__init__.py
================================================
================================================
FILE: examples/account.py
================================================
class Account(object):
def __init__(self, first, last, id, balance):
self.first = first
self.last = last
self.id = id
self.balance = balance
def info(self):
return (self.first, self.last, self.id, self.balance)
def __eq__(self, other):
if isinstance(other, type(self)):
return self.info() == other.info()
return False
def __hash__(self):
return hash((type(self), self.info()))
def __str__(self):
return "Account: %s %s, id %d, balance %d" % self.info()
__repr__ = __str__
================================================
FILE: examples/commutative.py
================================================
from kanren import fact, run, var
from kanren.assoccomm import associative, commutative
from kanren.assoccomm import eq_assoccomm as eq
# Define some dummy Operationss
add = "add"
mul = "mul"
# Declare that these ops are commutative using the facts system
fact(commutative, mul)
fact(commutative, add)
fact(associative, mul)
fact(associative, add)
# Define some logic variables
x, y = var(), var()
# Two expressions to match
pattern = (mul, (add, 1, x), y) # (1 + x) * y
expr = (mul, 2, (add, 3, 1)) # 2 * (3 + 1)
res = run(0, (x, y), eq(pattern, expr))
print(res)
# prints ((3, 2),) meaning
# x matches to 3
# y matches to 2
================================================
FILE: examples/corleone.py
================================================
"""
Family relationships from The Godfather Translated from the core.logic example
found in "The Magical Island of Kanren - core.logic Intro Part 1"
http://objectcommando.com/blog/2011/11/04/the-magical-island-of-kanren-core-logic-intro-part-1/
"""
import toolz
from kanren import Relation, conde, facts, run, var
father = Relation()
mother = Relation()
facts(
father,
("Vito", "Michael"),
("Vito", "Sonny"),
("Vito", "Fredo"),
("Michael", "Anthony"),
("Michael", "Mary"),
("Sonny", "Vicent"),
("Sonny", "Francesca"),
("Sonny", "Kathryn"),
("Sonny", "Frank"),
("Sonny", "Santino"),
)
facts(
mother,
("Carmela", "Michael"),
("Carmela", "Sonny"),
("Carmela", "Fredo"),
("Kay", "Mary"),
("Kay", "Anthony"),
("Sandra", "Francesca"),
("Sandra", "Kathryn"),
("Sandra", "Frank"),
("Sandra", "Santino"),
)
q = var()
print((run(0, q, father("Vito", q)))) # Vito is the father of who?
# ('Sonny', 'Michael', 'Fredo')
print((run(0, q, father(q, "Michael")))) # Who is the father of Michael?
# ('Vito',)
def parent(p, child):
return conde([father(p, child)], [mother(p, child)])
print((run(0, q, parent(q, "Michael")))) # Who is a parent of Michael?
# ('Vito', 'Carmela')
def grandparent(gparent, child):
p = var()
return conde((parent(gparent, p), parent(p, child)))
print(run(0, q, grandparent(q, "Anthony"))) # Who is a grandparent of Anthony?
# ('Vito', 'Carmela')
print(run(0, q, grandparent("Vito", q))) # Vito is a grandparent of whom?
# ('Vicent', 'Anthony', 'Kathryn', 'Mary', 'Frank', 'Santino', 'Francesca')
def sibling(a, b):
p = var()
return conde((parent(p, a), parent(p, b)))
# All spouses
x, y, z = var(), var(), var()
print(run(0, (x, y), father(x, z), mother(y, z), results_filter=toolz.unique))
# (('Sonny', 'Sandra'), ('Vito', 'Carmela'), ('Michael', 'Kay'))
================================================
FILE: examples/data/adjacent-states.txt
================================================
# Author Gregg Lind
# License: Public Domain. I would love to hear about any projects you use
# if it for though!
# http://writeonly.wordpress.com/2009/03/20/adjacency-list-of-states-of-the-united-states-us/
AK
AL,MS,TN,GA,FL
AR,MO,TN,MS,LA,TX,OK
AZ,CA,NV,UT,CO,NM
CA,OR,NV,AZ
CO,WY,NE,KS,OK,NM,AZ,UT
CT,NY,MA,RI
DC,MD,VA
DE,MD,PA,NJ
FL,AL,GA
GA,FL,AL,TN,NC,SC
HI
IA,MN,WI,IL,MO,NE,SD
ID,MT,WY,UT,NV,OR,WA
IL,IN,KY,MO,IA,WI
IN,MI,OH,KY,IL
KS,NE,MO,OK,CO
KY,IN,OH,WV,VA,TN,MO,IL
LA,TX,AR,MS
MA,RI,CT,NY,NH,VT
MD,VA,WV,PA,DC,DE
ME,NH
MI,WI,IN,OH
MN,WI,IA,SD,ND
MO,IA,IL,KY,TN,AR,OK,KS,NE
MS,LA,AR,TN,AL
MT,ND,SD,WY,ID
NC,VA,TN,GA,SC
ND,MN,SD,MT
NE,SD,IA,MO,KS,CO,WY
NH,VT,ME,MA
NJ,DE,PA,NY
NM,AZ,UT,CO,OK,TX
NV,ID,UT,AZ,CA,OR
NY,NJ,PA,VT,MA,CT
OH,PA,WV,KY,IN,MI
OK,KS,MO,AR,TX,NM,CO
OR,CA,NV,ID,WA
PA,NY,NJ,DE,MD,WV,OH
RI,CT,MA
SC,GA,NC
SD,ND,MN,IA,NE,WY,MT
TN,KY,VA,NC,GA,AL,MS,AR,MO
TX,NM,OK,AR,LA
UT,ID,WY,CO,NM,AZ,NV
VA,NC,TN,KY,WV,MD,DC
VT,NY,NH,MA
WA,ID,OR
WI,MI,MN,IA,IL
WV,OH,PA,MD,VA,KY
WY,MT,SD,NE,CO,UT,ID
================================================
FILE: examples/data/coastal-states.txt
================================================
WA,OR,CA,TX,LA,MI,AL,GA,FL,SC,NC,VI,MD,DW,NJ,NY,CT,RI,MA,MN,NH
================================================
FILE: examples/states.py
================================================
"""
An example showing how to use facts and relations to store data and query data
This example builds a small database of the US states.
The `adjacency` relation expresses which states border each other.
The `coastal` relation expresses which states border the ocean.
"""
from kanren import Relation, fact, run, var
adjacent = Relation()
coastal = Relation()
coastal_states = (
"WA,OR,CA,TX,LA,MS,AL,GA,FL,SC,NC,VA,MD,DE,NJ,NY,CT,RI,MA,ME,NH,AK,HI".split(",")
)
# ['NY', 'NJ', 'CT', ...]
for state in coastal_states:
# E.g. 'NY' is coastal
fact(coastal, state)
# Lines like 'CA,OR,NV,AZ'
with open("examples/data/adjacent-states.txt") as f:
adjlist = [line.strip().split(",") for line in f if line and line[0].isalpha()]
# ['CA', 'OR', 'NV', 'AZ']
for L in adjlist:
# 'CA', ['OR', 'NV', 'AZ']
head, tail = L[0], L[1:]
for state in tail:
# E.g. 'CA' is adjacent to 'OR', 'CA' is adjacent to 'NV', etc.
fact(adjacent, head, state)
x = var()
y = var()
# Is California adjacent to New York?
print(run(0, x, adjacent("CA", "NY")))
# ()
# All states next to California
print(run(0, x, adjacent("CA", x)))
# ('AZ', 'OR', 'NV')
# All coastal states next to Texas
print(run(0, x, adjacent("TX", x), coastal(x)))
# ('LA',)
# Five states that border a coastal state
print(run(5, x, coastal(y), adjacent(x, y)))
# ('LA', 'NM', 'OK', 'AR', 'RI')
# All states adjacent to Tennessee and adjacent to Florida
print(run(0, x, adjacent("TN", x), adjacent("FL", x)))
# ('AL', 'GA')
================================================
FILE: examples/user_classes.py
================================================
from operator import add, gt, sub
from examples.account import Account
from kanren import eq, membero, run, unifiable, var
from kanren.core import lall
from kanren.term import applyo, term # noqa: F401
unifiable(Account) # Register Account class
accounts = (
Account("Adam", "Smith", 1, 20),
Account("Carl", "Marx", 2, 3),
Account("John", "Rockefeller", 3, 1000),
)
# optional name strings are helpful for debugging
first = var(prefix="first")
last = var(prefix="last")
ident = var(prefix="ident")
balance = var(prefix="balance")
newbalance = var(prefix="newbalance")
# Describe a couple of transformations on accounts
source = Account(first, last, ident, balance)
target = Account(first, last, ident, newbalance)
theorists = ("Adam", "Carl")
# Give $10 to theorists
theorist_bonus = lall(
membero(source, accounts),
membero(first, theorists),
applyo(add, (10, balance), newbalance),
)
# Take $10 from anyone with more than $100
a = var(prefix="a")
tax_the_rich = lall(
membero(source, accounts),
applyo(gt, (balance, 100), a),
eq(a, True),
applyo(sub, (balance, 10), newbalance),
)
print("Take $10 from anyone with more than $100")
print(run(0, target, tax_the_rich))
print("Give $10 to theorists")
print(run(0, target, theorist_bonus))
================================================
FILE: examples/zebra-puzzle.py
================================================
"""
Zebra puzzle as published in Life International in 1962.
https://en.wikipedia.org/wiki/Zebra_Puzzle
"""
from dataclasses import dataclass, field
from typing import Union
from unification import Var, unifiable, var, vars
from kanren import conde, eq, lall, membero, run
@unifiable
@dataclass
class House:
nationality: Union[str, Var] = field(default_factory=var)
drink: Union[str, Var] = field(default_factory=var)
animal: Union[str, Var] = field(default_factory=var)
cigarettes: Union[str, Var] = field(default_factory=var)
color: Union[str, Var] = field(default_factory=var)
def righto(right, left, houses):
"""Express that `right` is on the right of `left` among all the houses."""
neighbors = tuple(zip(houses[:-1], houses[1:]))
return membero((left, right), neighbors)
def nexto(a, b, houses):
"""Express that `a` and `b` are next to each other."""
return conde([righto(a, b, houses)], [righto(b, a, houses)])
# And now for the riddle
houses = vars(5)
goals = lall(
membero(House("Englishman", color="red"), houses),
membero(House("Spaniard", animal="dog"), houses),
membero(House(drink="coffee", color="green"), houses),
membero(House("Ukrainian", drink="tea"), houses),
righto(House(color="green"), House(color="ivory"), houses),
membero(House(animal="snails", cigarettes="Old Gold"), houses),
membero(House(color="yellow", cigarettes="Kools"), houses),
eq(House(drink="milk"), houses[2]),
eq(House("Norwegian"), houses[0]),
nexto(House(cigarettes="Chesterfields"), House(animal="fox"), houses),
nexto(House(cigarettes="Kools"), House(animal="horse"), houses),
membero(House(drink="orange juice", cigarettes="Lucky Strike"), houses),
membero(House("Japanese", cigarettes="Parliaments"), houses),
nexto(House("Norwegian"), House(color="blue"), houses),
membero(House(drink="water"), houses),
membero(House(animal="zebra"), houses),
)
results = run(0, houses, goals)
print(results)
# (
# [
# House(
# nationality="Norwegian",
# drink="water",
# animal="fox",
# cigarettes="Kools",
# color="yellow",
# ),
# House(
# nationality="Ukrainian",
# drink="tea",
# animal="horse",
# cigarettes="Chesterfields",
# color="blue",
# ),
# House(
# nationality="Englishman",
# drink="milk",
# animal="snails",
# cigarettes="Old Gold",
# color="red",
# ),
# House(
# nationality="Spaniard",
# drink="orange juice",
# animal="dog",
# cigarettes="Lucky Strike",
# color="ivory",
# ),
# House(
# nationality="Japanese",
# drink="coffee",
# animal="zebra",
# cigarettes="Parliaments",
# color="green",
# ),
# ],
# )
================================================
FILE: kanren/__init__.py
================================================
# flake8: noqa
"""kanren is a Python library for logic and relational programming."""
from importlib.metadata import version
from unification import Var, isvar, reify, unifiable, unify, var, variables, vars
from .core import conde, eq, lall, lany, run
from .facts import Relation, fact, facts
from .goals import (
appendo,
conso,
heado,
itero,
membero,
nullo,
permuteo,
permuteq,
rembero,
tailo,
)
from .term import arguments, operator, term, unifiable_with_term
__version__ = version("miniKanren")
================================================
FILE: kanren/assoccomm.py
================================================
"""Functions for associative and commutative unification.
This module provides goals for associative and commutative unification. It
accomplishes this through naively trying all possibilities. This was built to
be used in the computer algebra systems SymPy and Theano.
>>> from kanren import run, var, fact
>>> from kanren.assoccomm import eq_assoccomm as eq
>>> from kanren.assoccomm import commutative, associative
>>> # Define some dummy Ops
>>> add = 'add'
>>> mul = 'mul'
>>> # Declare that these ops are commutative using the facts system
>>> fact(commutative, mul)
>>> fact(commutative, add)
>>> fact(associative, mul)
>>> fact(associative, add)
>>> # Define some wild variables
>>> x, y = var('x'), var('y')
>>> # Two expressions to match
>>> pattern = (mul, (add, 1, x), y) # (1 + x) * y
>>> expr = (mul, 2, (add, 3, 1)) # 2 * (3 + 1)
>>> print(run(0, (x,y), eq(pattern, expr)))
((3, 2),)
"""
from collections.abc import Sequence
from functools import partial
from operator import eq as equal
from operator import length_hint
from cons.core import ConsPair, car, cdr
from etuples import etuple
from toolz import sliding_window
from unification import reify, unify, var
from .core import conde, eq, ground_order, lall, succeed
from .facts import Relation
from .goals import itero, permuteo
from .graph import term_walko
from .term import term
associative = Relation("associative")
commutative = Relation("commutative")
def flatten_assoc_args(op_predicate, items):
for i in items:
if isinstance(i, ConsPair) and op_predicate(car(i)):
i_cdr = cdr(i)
if length_hint(i_cdr) > 0:
yield from flatten_assoc_args(op_predicate, i_cdr)
else:
yield i
else:
yield i
def assoc_args(rator, rands, n, ctor=None):
"""Produce all associative argument combinations of rator + rands in n-sized rand groupings.
>>> from kanren.assoccomm import assoc_args
>>> list(assoc_args('op', [1, 2, 3], 2))
[[['op', 1, 2], 3], [1, ['op', 2, 3]]]
""" # noqa: E501
assert n > 0
rands_l = list(rands)
if ctor is None:
ctor = type(rands)
if n == len(rands_l):
yield ctor(rands)
return
for i, new_rands in enumerate(sliding_window(n, rands_l)):
prefix = rands_l[:i]
new_term = term(rator, ctor(new_rands))
suffix = rands_l[n + i :]
res = ctor(prefix + [new_term] + suffix)
yield res
def eq_assoc_args(
op, a_args, b_args, n=None, inner_eq=eq, no_ident=False, null_type=etuple
):
"""Create a goal that applies associative unification to an operator and two sets of arguments.
This is a non-relational utility goal. It does assumes that the op and at
least one set of arguments are ground under the state in which it is
evaluated.
""" # noqa: E501
u_args, v_args = var(), var()
def eq_assoc_args_goal(S):
nonlocal op, u_args, v_args, n
(op_rf, u_args_rf, v_args_rf, n_rf) = reify((op, u_args, v_args, n), S)
if isinstance(v_args_rf, Sequence):
u_args_rf, v_args_rf = v_args_rf, u_args_rf
if isinstance(u_args_rf, Sequence) and isinstance(v_args_rf, Sequence):
# TODO: We just ignore `n` when both are sequences?
if type(u_args_rf) != type(v_args_rf):
return
if no_ident and unify(u_args_rf, v_args_rf, S) is not False:
return
op_pred = partial(equal, op_rf)
u_args_flat = type(u_args_rf)(flatten_assoc_args(op_pred, u_args_rf))
v_args_flat = type(v_args_rf)(flatten_assoc_args(op_pred, v_args_rf))
if len(u_args_flat) == len(v_args_flat):
g = inner_eq(u_args_flat, v_args_flat)
else:
if len(u_args_flat) < len(v_args_flat):
sm_args, lg_args = u_args_flat, v_args_flat
else:
sm_args, lg_args = v_args_flat, u_args_flat
grp_sizes = len(lg_args) - len(sm_args) + 1
assoc_terms = assoc_args(
op_rf, lg_args, grp_sizes, ctor=type(u_args_rf)
)
g = conde([inner_eq(sm_args, a_args)] for a_args in assoc_terms)
yield from g(S)
elif isinstance(u_args_rf, Sequence):
# TODO: We really need to know the arity (ranges) for the operator
# in order to make good choices here.
# For instance, does `(op, 1, 2) == (op, (op, 1, 2))` make sense?
# If so, the lower-bound on this range should actually be `1`.
if len(u_args_rf) == 1:
if not no_ident and (n_rf == 1 or n_rf is None):
g = inner_eq(u_args_rf, v_args_rf)
else:
return
else:
u_args_flat = list(flatten_assoc_args(partial(equal, op_rf), u_args_rf))
if n_rf is not None:
arg_sizes = [n_rf]
else:
arg_sizes = range(2, len(u_args_flat) + (not no_ident))
v_ac_args = (
v_ac_arg
for n_i in arg_sizes
for v_ac_arg in assoc_args(
op_rf, u_args_flat, n_i, ctor=type(u_args_rf)
)
if not no_ident or v_ac_arg != u_args_rf
)
g = conde([inner_eq(v_args_rf, v_ac_arg)] for v_ac_arg in v_ac_args)
yield from g(S)
return lall(
ground_order((a_args, b_args), (u_args, v_args)),
itero(u_args, nullo_refs=(v_args,), default_ConsNull=null_type),
eq_assoc_args_goal,
)
def eq_assoc(u, v, n=None, op_predicate=associative, null_type=etuple):
"""Create a goal for associative unification of two terms.
>>> from kanren import run, var, fact
>>> from kanren.assoccomm import eq_assoc as eq
>>> fact(commutative, 'add') # declare that 'add' is commutative
>>> fact(associative, 'add') # declare that 'add' is associative
>>> x = var()
>>> run(0, x, eq(('add', 1, 2, 3), ('add', 1, x)))
(('add', 2, 3),)
"""
def assoc_args_unique(a, b, op, **kwargs):
return eq_assoc_args(op, a, b, no_ident=True, null_type=null_type)
return term_walko(op_predicate, assoc_args_unique, u, v, n=n)
def eq_comm(u, v, op_predicate=commutative, null_type=etuple):
"""Create a goal for commutative equality.
>>> from kanren import run, var, fact
>>> from kanren.assoccomm import eq_comm as eq
>>> from kanren.assoccomm import commutative, associative
>>> fact(commutative, 'add') # declare that 'add' is commutative
>>> fact(associative, 'add') # declare that 'add' is associative
>>> x = var()
>>> run(0, x, eq(('add', 1, 2, 3), ('add', 2, x, 1)))
(3,)
"""
def permuteo_unique(x, y, op, **kwargs):
return permuteo(x, y, no_ident=True, default_ConsNull=null_type)
return term_walko(op_predicate, permuteo_unique, u, v)
def assoc_flatten(a, a_flat):
def assoc_flatten_goal(S):
nonlocal a, a_flat
a_rf = reify(a, S)
if isinstance(a_rf, Sequence) and (a_rf[0],) in associative.facts:
def op_pred(sub_op):
nonlocal S
sub_op_rf = reify(sub_op, S)
return sub_op_rf == a_rf[0]
a_flat_rf = type(a_rf)(flatten_assoc_args(op_pred, a_rf))
else:
a_flat_rf = a_rf
yield from eq(a_flat, a_flat_rf)(S)
return assoc_flatten_goal
def eq_assoccomm(u, v, null_type=etuple):
"""Construct a goal for associative and commutative unification.
>>> from kanren.assoccomm import eq_assoccomm as eq
>>> from kanren.assoccomm import commutative, associative
>>> from kanren import fact, run, var
>>> fact(commutative, 'add') # declare that 'add' is commutative
>>> fact(associative, 'add') # declare that 'add' is associative
>>> x = var()
>>> e1 = ('add', 1, 2, 3)
>>> e2 = ('add', 1, x)
>>> run(0, x, eq(e1, e2))
(('add', 3, 2), ('add', 2, 3))
"""
def eq_assoccomm_step(a, b, op):
z = var()
return lall(
# Permute
conde(
[
commutative(op),
permuteo(a, z, no_ident=True, default_ConsNull=etuple),
],
[eq(a, z)],
),
# Generate associative combinations
conde(
[associative(op), eq_assoc_args(op, z, b, no_ident=True)], [eq(z, b)]
),
)
return term_walko(
lambda x: succeed,
eq_assoccomm_step,
u,
v,
format_step=assoc_flatten,
no_ident=False,
)
================================================
FILE: kanren/constraints.py
================================================
import weakref
from abc import ABC, abstractmethod
from collections import UserDict
from collections.abc import Mapping
from typing import Optional
from cons.core import ConsPair
from toolz import groupby
from unification import Var, reify, unify, var
from unification.core import _reify, isground
from unification.utils import transitive_get as walk
from .util import FlexibleSet
class ConstraintStore(ABC):
"""A class that enforces constraints between logic variables in a miniKanren state.
Attributes
----------
lvar_constraints: MutableMapping
A mapping of logic variables to sets of objects that define their
constraints (e.g. a set of items with which the logic variable cannot
be unified). The mapping's values are entirely determined by the
ConstraintStore implementation.
"""
__slots__ = ("lvar_constraints",)
op_str: Optional[str] = None
def __init__(self, lvar_constraints=None):
# self.lvar_constraints = weakref.WeakKeyDictionary(lvar_constraints)
self.lvar_constraints = lvar_constraints or dict()
@abstractmethod
def pre_unify_check(self, lvar_map, lvar=None, value=None):
"""Check a key-value pair before they're added to a ConstrainedState."""
raise NotImplementedError()
@abstractmethod
def post_unify_check(self, lvar_map, lvar=None, value=None, old_state=None):
"""Check a key-value pair after they're added to a ConstrainedState.
XXX: This method may alter the internal constraints, so make a copy!
"""
raise NotImplementedError()
def add(self, lvar, lvar_constraint, **kwargs):
"""Add a new constraint."""
if lvar not in self.lvar_constraints:
self.lvar_constraints[lvar] = FlexibleSet([lvar_constraint])
else:
self.lvar_constraints[lvar].add(lvar_constraint)
def constraints_str(self, lvar):
"""Print the constraints on a logic variable."""
if lvar in self.lvar_constraints:
return f"{self.op_str} {self.lvar_constraints[lvar]}"
else:
return ""
def copy(self):
return type(self)(
lvar_constraints={k: v.copy() for k, v in self.lvar_constraints.items()},
)
def __contains__(self, lvar):
return lvar in self.lvar_constraints
def __eq__(self, other):
return (
type(self) == type(other)
and self.op_str == other.op_str
and self.lvar_constraints == other.lvar_constraints
)
def __repr__(self):
return f"ConstraintStore({self.op_str}: {self.lvar_constraints})"
class ConstrainedState(UserDict):
"""A miniKanren state that holds unifications of logic variables and upholds constraints on logic variables.""" # noqa: E501
__slots__ = ("constraints",)
def __init__(self, *s, constraints=None):
super().__init__(*s)
self.constraints = dict(constraints or [])
def pre_unify_checks(self, lvar, value):
"""Check the constraints before unification."""
return all(
cstore.pre_unify_check(self.data, lvar, value)
for cstore in self.constraints.values()
)
def post_unify_checks(self, lvar_map, lvar, value):
"""Check constraints and return an updated state and constraints.
Returns
-------
A new `ConstrainedState` and `False`.
"""
S = self.copy(data=lvar_map)
if any(
not cstore.post_unify_check(lvar_map, lvar, value, old_state=S)
for cstore in S.constraints.values()
):
return False
return S
def copy(self, data=None):
if data is None:
data = self.data.copy()
return type(self)(
data, constraints={k: v.copy() for k, v in self.constraints.items()}
)
def __eq__(self, other):
if isinstance(other, ConstrainedState):
return self.data == other.data and self.constraints == other.constraints
if isinstance(other, Mapping) and not self.constraints:
return self.data == other
return False
def __repr__(self):
return f"ConstrainedState({repr(self.data)}, {self.constraints})"
def unify_ConstrainedState(u, v, S):
if S.pre_unify_checks(u, v):
s = unify(u, v, S.data)
if s is not False:
S = S.post_unify_checks(s, u, v)
if S is not False:
return S
return False
unify.add((object, object, ConstrainedState), unify_ConstrainedState)
class ConstrainedVar(Var):
"""A logic variable that tracks its own constraints.
Currently, this is only for display/reification purposes.
"""
__slots__ = ("S", "var")
def __init__(self, var, S):
self.S = weakref.ref(S)
self.token = var.token
self.var = weakref.ref(var)
def __repr__(self):
S = self.S()
var = self.var()
res = super().__repr__()
if S is not None and var is not None:
u_constraints = ",".join(
[c.constraints_str(var) for c in S.constraints.values()]
)
return f"{res}: {{{u_constraints}}}"
else:
return res
def __eq__(self, other):
if type(other) == type(self):
return self.S == other.S and self.token == other.token
elif type(other) == Var:
# NOTE: A more valid comparison is same token and no constraints.
return self.token == other.token
return NotImplemented
def __hash__(self):
return hash((Var, self.token))
def _reify_ConstrainedState(u, S):
u_res = walk(u, S.data)
if u_res is u:
yield ConstrainedVar(u_res, S)
else:
yield _reify(u_res, S)
_reify.add((Var, ConstrainedState), _reify_ConstrainedState)
class DisequalityStore(ConstraintStore):
"""A disequality constraint (i.e. two things do not unify)."""
op_str = "neq"
def __init__(self, lvar_constraints=None):
super().__init__(lvar_constraints)
def post_unify_check(self, lvar_map, lvar=None, value=None, old_state=None):
for lv_key, constraints in list(self.lvar_constraints.items()):
lv = reify(lv_key, lvar_map)
constraints_rf = reify(tuple(constraints), lvar_map)
for cs in constraints_rf:
s = unify(lv, cs, {})
if s is not False and not s:
# They already unify, but with no unground logic variables,
# so we have an immediate violation of the constraint.
return False
elif s is False:
# They don't unify and have no unground logic variables, so
# the constraint is immediately satisfied and there's no
# reason to continue checking this constraint.
constraints.discard(cs)
else:
# They unify when/if the unifications in `s` are made, so
# let's add these as new constraints.
for k, v in s.items():
self.add(k, v)
if len(constraints) == 0:
# This logic variable has no more unground constraints, so
# remove it.
del self.lvar_constraints[lv_key]
return True
def pre_unify_check(self, lvar_map, lvar=None, value=None):
return True
def neq(u, v):
"""Construct a disequality goal."""
def neq_goal(S):
nonlocal u, v
u_rf, v_rf = reify((u, v), S)
# Get the unground logic variables that would unify the two objects;
# these are all the logic variables that we can't let unify.
s_uv = unify(u_rf, v_rf, {})
if s_uv is False:
# They don't unify and have no unground logic variables, so the
# constraint is immediately satisfied.
yield S
return
elif not s_uv:
# They already unify, but with no unground logic variables, so we
# have an immediate violation of the constraint.
return
if not isinstance(S, ConstrainedState):
S = ConstrainedState(S)
cs = S.constraints.setdefault(DisequalityStore, DisequalityStore())
for lvar, obj in s_uv.items():
cs.add(lvar, obj)
# We need to check the current state for validity.
if cs.post_unify_check(S.data):
yield S
return neq_goal
class PredicateStore(ConstraintStore, ABC):
"""An abstract store for testing simple predicates."""
# Require that all constraints be satisfied for a term; otherwise, succeed
# if only one is satisfied.
require_all_constraints = True
# @abstractmethod
# def cterm_type_check(self, lvt):
# """Check the type of the constrained term when it's ground."""
# raise NotImplementedError()
@abstractmethod
def cparam_type_check(self, lvt):
"""Check the type of the constraint parameter when it's ground."""
raise NotImplementedError()
@abstractmethod
def constraint_check(self, lv, lvt):
"""Check the constrained term against the constraint parameters when they're ground.
I.e. test the constraint.
"""
raise NotImplementedError()
@abstractmethod
def constraint_isground(self, lv, lvar_map):
"""Check whether or not the constrained term is "ground enough" to be checked.""" # noqa: E501
raise NotImplementedError()
def post_unify_check(self, lvar_map, lvar=None, value=None, old_state=None):
for lv_key, constraints in list(self.lvar_constraints.items()):
lv = reify(lv_key, lvar_map)
is_lv_ground = self.constraint_isground(lv, lvar_map) or isground(
lv, lvar_map
)
if not is_lv_ground:
# This constraint isn't ready to be checked
continue
# if is_lv_ground and not self.cterm_type_check(lv):
# self.lvar_constraints[lv_key]
# return False
constraint_grps = groupby(
lambda x: isground(x, lvar_map), reify(iter(constraints), lvar_map)
)
constraints_unground = constraint_grps.get(False, ())
constraints_ground = constraint_grps.get(True, ())
if len(constraints_ground) > 0 and not all(
self.cparam_type_check(c) for c in constraints_ground
):
# Some constraint parameters aren't the correct type, so fail.
# del self.lvar_constraints[lv_key]
return False
assert constraints_unground or constraints_ground
if is_lv_ground and len(constraints_unground) == 0:
if self.require_all_constraints and any(
not self.constraint_check(lv, t) for t in constraints_ground
):
return False
elif not self.require_all_constraints and not any(
self.constraint_check(lv, t) for t in constraints_ground
):
return False
# The instance and constraint parameters are all ground and the
# constraint is satisfied, so, since nothing should change from
# here on, we can remove the constraint.
del self.lvar_constraints[lv_key]
# Some types are unground, so we continue checking until they are
return True
def pre_unify_check(self, lvar_map, lvar=None, value=None):
return True
class TypeStore(PredicateStore):
"""A constraint store for asserting object types."""
require_all_constraints = True
op_str = "typeo"
def __init__(self, lvar_constraints=None):
super().__init__(lvar_constraints)
def add(self, lvt, cparams):
if lvt in self.lvar_constraints:
raise ValueError("Only one type constraint can be applied to a term")
return super().add(lvt, cparams)
# def cterm_type_check(self, lvt):
# return True
def cparam_type_check(self, x):
return isinstance(x, type)
def constraint_check(self, x, cx):
return type(x) == cx
def constraint_isground(self, lv, lvar_map):
return not (isinstance(lv, Var) or issubclass(type(lv), ConsPair))
def typeo(u, u_type):
"""Construct a goal specifying the type of a term."""
def typeo_goal(S):
nonlocal u, u_type
u_rf, u_type_rf = reify((u, u_type), S)
if not isground(u_rf, S) or not isground(u_type_rf, S):
if not isinstance(S, ConstrainedState):
S = ConstrainedState(S)
cs = S.constraints.setdefault(TypeStore, TypeStore())
try:
cs.add(u_rf, u_type_rf)
except TypeError:
# If the instance object can't be hashed, we can simply use a
# logic variable to uniquely identify it.
u_lv = var()
S[u_lv] = u_rf
cs.add(u_lv, u_type_rf)
if cs.post_unify_check(S.data, u_rf, u_type_rf):
yield S
elif isinstance(u_type_rf, type) and type(u_rf) == u_type_rf:
yield S
return typeo_goal
class IsinstanceStore(PredicateStore):
"""A constraint store for asserting object instance types."""
op_str = "isinstanceo"
# Satisfying any one constraint is good enough
require_all_constraints = False
def __init__(self, lvar_constraints=None):
super().__init__(lvar_constraints)
# def cterm_type_check(self, lvt):
# return True
def cparam_type_check(self, lvt):
return isinstance(lvt, type)
def constraint_check(self, lv, lvt):
return isinstance(lv, lvt)
def constraint_isground(self, lv, lvar_map):
return not (isinstance(lv, Var) or issubclass(type(lv), ConsPair))
def isinstanceo(u, u_type):
"""Construct a goal specifying that a term is an instance of a type.
Only a single instance type can be assigned per goal, i.e.
lany(isinstanceo(var(), list),
isinstanceo(var(), tuple))
and not
isinstanceo(var(), (list, tuple))
"""
def isinstanceo_goal(S):
nonlocal u, u_type
u_rf, u_type_rf = reify((u, u_type), S)
if not isground(u_rf, S) or not isground(u_type_rf, S):
if not isinstance(S, ConstrainedState):
S = ConstrainedState(S)
cs = S.constraints.setdefault(IsinstanceStore, IsinstanceStore())
try:
cs.add(u_rf, u_type_rf)
except TypeError:
# If the instance object can't be hashed, we can simply use a
# logic variable to uniquely identify it.
u_lv = var()
S[u_lv] = u_rf
cs.add(u_lv, u_type_rf)
if cs.post_unify_check(S.data, u_rf, u_type_rf):
yield S
# elif isground(u_type, S):
# yield from lany(eq(u_type, u_t) for u_t in type(u).mro())(S)
elif (
isinstance(u_type_rf, type)
# or (
# isinstance(u_type, Iterable)
# and all(isinstance(t, type) for t in u_type)
# )
) and isinstance(u_rf, u_type_rf):
yield S
return isinstanceo_goal
================================================
FILE: kanren/core.py
================================================
from collections.abc import Sequence
from functools import partial, reduce
from itertools import tee
from operator import length_hint
from typing import (
Any,
Callable,
Iterable,
Iterator,
Literal,
MutableMapping,
Optional,
Tuple,
Union,
cast,
)
from cons.core import ConsPair
from toolz import interleave, take
from unification import isvar, reify, unify
from unification.core import isground
StateType = Union[MutableMapping, Literal[False]]
StateStreamType = Iterator[StateType]
GoalType = Callable[[StateType], StateStreamType]
def fail(s: StateType) -> Iterator[StateType]:
return iter(())
def succeed(s: StateType) -> Iterator[StateType]:
return iter((s,))
def eq(u: Any, v: Any) -> GoalType:
"""Construct a goal stating that its arguments must unify.
See Also
--------
unify
"""
def eq_goal(s: StateType) -> StateStreamType:
s = unify(u, v, s)
if s is not False:
return iter((s,))
else:
return iter(())
return eq_goal
def ldisj_seq(goals: Iterable[GoalType]) -> GoalType:
"""Produce a goal that returns the appended state stream from all successful goal arguments.
In other words, it behaves like logical disjunction/OR for goals.
""" # noqa: E501
if length_hint(goals, -1) == 0:
return succeed
assert isinstance(goals, Iterable)
def ldisj_seq_goal(S: StateType) -> StateStreamType:
nonlocal goals
goals, _goals = tee(goals)
yield from interleave(g(S) for g in _goals)
return ldisj_seq_goal
def bind(z: StateStreamType, g: GoalType) -> StateStreamType:
"""Apply a goal to a state stream and then combine the resulting state streams."""
# We could also use `chain`, but `interleave` preserves the old behavior.
# return chain.from_iterable(map(g, z))
return cast(StateStreamType, interleave(map(g, z)))
def lconj_seq(goals: Iterable[GoalType]) -> GoalType:
"""Produce a goal that returns the appended state stream in which all goals are necessarily successful.
In other words, it behaves like logical conjunction/AND for goals.
""" # noqa: E501
if length_hint(goals, -1) == 0:
return succeed
assert isinstance(goals, Iterable)
def lconj_seq_goal(S: StateType) -> StateStreamType:
nonlocal goals
goals, _goals = tee(goals)
g0 = next(_goals, None)
if g0 is None:
return
yield from reduce(bind, _goals, g0(S))
return lconj_seq_goal
def ldisj(*goals: Union[GoalType, Iterable[GoalType]]) -> GoalType:
"""Form a disjunction of goals."""
if len(goals) == 1 and isinstance(goals[0], Iterable):
return ldisj_seq(goals[0])
return ldisj_seq(cast(Tuple[GoalType, ...], goals))
def lconj(*goals: Union[GoalType, Iterable[GoalType]]) -> GoalType:
"""Form a conjunction of goals."""
if len(goals) == 1 and isinstance(goals[0], Iterable):
return lconj_seq(goals[0])
return lconj_seq(cast(Tuple[GoalType, ...], goals))
def conde(
*goals: Union[Iterable[GoalType], Iterator[Iterable[GoalType]]]
) -> Union[GoalType, StateStreamType]:
"""Form a disjunction of goal conjunctions."""
if len(goals) == 1 and isinstance(goals[0], Iterator):
return ldisj_seq(
lconj_seq(g) for g in cast(Iterator[Iterable[GoalType]], goals[0])
)
return ldisj_seq(lconj_seq(g) for g in cast(Tuple[Iterable[GoalType], ...], goals))
lall = lconj
lany = ldisj
def ground_order_key(S: StateType, x: Any) -> Literal[-1, 0, 1, 2]:
if isvar(x):
return 2
elif isground(x, S):
return -1
elif issubclass(type(x), ConsPair):
return 1
else:
return 0
def ground_order(in_args: Any, out_args: Any) -> GoalType:
"""Construct a non-relational goal that orders a list of terms based on groundedness (grounded precede ungrounded).""" # noqa: E501
def ground_order_goal(S: StateType) -> StateStreamType:
nonlocal in_args, out_args
in_args_rf, out_args_rf = reify((in_args, out_args), S)
S_new = unify(
list(out_args_rf) if isinstance(out_args_rf, Sequence) else out_args_rf,
sorted(in_args_rf, key=partial(ground_order_key, S)),
S,
)
if S_new is not False:
yield S_new
return ground_order_goal
def ifa(g1: GoalType, g2: GoalType) -> GoalType:
"""Create a goal operator that returns the first stream unless it fails."""
def ifa_goal(S: StateType) -> StateStreamType:
g1_stream = g1(S)
S_new = next(g1_stream, None)
if S_new is None:
yield from g2(S)
else:
yield S_new
yield from g1_stream
return ifa_goal
def Zzz(gctor: Callable[[Any], GoalType], *args, **kwargs) -> GoalType:
"""Create an inverse-η-delay for a goal."""
def Zzz_goal(S: StateType) -> StateStreamType:
yield from gctor(*args, **kwargs)(S)
return Zzz_goal
def run(
n: Union[None, int],
x: Any,
*goals: GoalType,
results_filter: Optional[Callable[[Iterator[Any]], Any]] = None
) -> Union[Tuple[Any, ...], Iterator[Any]]:
"""Run a logic program and obtain `n` solutions that satisfy the given goals.
>>> from kanren import run, var, eq
>>> x = var()
>>> run(1, x, eq(x, 1))
(1,)
Parameters
----------
n
The number of desired solutions. ``n=0`` returns a tuple with all
results and ``n=None`` returns a lazy sequence of all results.
x
The form to reify and return. Usually contains logic variables used in
the given goals.
goals
A sequence of goals that must be true in logical conjunction
(i.e. `lall`).
results_filter
A function to apply to the results stream (e.g. a `unique` filter).
Returns
-------
Either an iterable or tuple of reified `x` values that satisfy the goals.
"""
g = lall(*goals)
results = map(partial(reify, x), g({}))
if results_filter is not None:
results = results_filter(results)
if n is None:
return results
elif n == 0:
return tuple(results)
else:
return tuple(take(n, results))
def dbgo(*args: Any, msg: Optional[Any] = None) -> GoalType: # pragma: no cover
"""Construct a goal that sets a debug trace and prints reified arguments."""
from pprint import pprint
def dbgo_goal(S: StateType) -> StateStreamType:
nonlocal args
args = reify(args, S)
if msg is not None:
print(msg)
pprint(args)
import pdb
pdb.set_trace()
yield S
return dbgo_goal
================================================
FILE: kanren/facts.py
================================================
from toolz import merge
from unification import reify, unify
from .util import intersection
class Relation(object):
_id = 0
def __init__(self, name=None):
self.facts = set()
self.index = dict()
if not name:
name = "_%d" % Relation._id
Relation._id += 1
self.name = name
def add_fact(self, *inputs):
"""Add a fact to the knowledge-base.
See Also
--------
fact
facts
"""
fact = tuple(inputs)
self.facts.add(fact)
for key in enumerate(inputs):
if key not in self.index:
self.index[key] = set()
self.index[key].add(fact)
def __call__(self, *args):
"""Return a goal that produces a list of substitutions matching a fact in the knowledge-base.
>>> from kanren.facts import Relation
>>> from unification import var
>>>
>>> x, y = var('x'), var('y')
>>> r = Relation()
>>> r.add_fact(1, 2, 3)
>>> r.add_fact(4, 5, 6)
>>> list(r(x, y, 3)({})) == [{y: 2, x: 1}]
True
>>> list(r(x, 5, y)({})) == [{y: 6, x: 4}]
True
>>> list(r(x, 42, y)({}))
[]
Parameters
----------
*args:
The goal to evaluate. This consists of vars and values to match
facts against.
""" # noqa: E501
def goal(substitution):
args2 = reify(args, substitution)
subsets = [self.index[key] for key in enumerate(args) if key in self.index]
if subsets: # we are able to reduce the pool early
facts = intersection(*sorted(subsets, key=len))
else:
facts = self.facts
for fact in facts:
unified = unify(fact, args2, substitution)
if unified != False:
yield merge(unified, substitution)
return goal
def __str__(self):
return f"Rel: {self.name}"
def __repr__(self):
return f"{type(self).__name__}({self.name}, {self.index}, {self.facts})"
def fact(rel, *args):
"""Declare a fact.
>>> from kanren import fact, Relation, var, run
>>> parent = Relation()
>>> fact(parent, "Homer", "Bart")
>>> fact(parent, "Homer", "Lisa")
>>> x = var()
>>> run(1, x, parent(x, "Bart"))
('Homer',)
"""
rel.add_fact(*args)
def facts(rel, *lists):
"""Declare several facts.
>>> from kanren import fact, Relation, var, run
>>> parent = Relation()
>>> facts(parent, ("Homer", "Bart"),
... ("Homer", "Lisa"))
>>> x = var()
>>> run(1, x, parent(x, "Bart"))
('Homer',)
"""
for lst in lists:
fact(rel, *lst)
================================================
FILE: kanren/goals.py
================================================
from collections import Counter
from collections.abc import Sequence
from functools import partial
from itertools import permutations
from operator import length_hint
from cons import cons
from cons.core import ConsNull, ConsPair
from unification import reify, var
from unification.core import isground
from .core import conde, eq, lall, lany
def heado(head, coll):
"""Construct a goal stating that head is the head of coll.
See Also
--------
tailo
conso
"""
return eq(cons(head, var()), coll)
def tailo(tail, coll):
"""Construct a goal stating that tail is the tail of coll.
See Also
--------
heado
conso
"""
return eq(cons(var(), tail), coll)
def conso(h, t, r):
"""Construct a goal stating that cons h + t == r."""
return eq(cons(h, t), r)
def nullo(*args, refs=None, default_ConsNull=list):
"""Create a goal asserting that one or more terms are a/the same `ConsNull` type.
`ConsNull` types return proper Python collections when used as a CDR value
in a CONS (e.g. `cons(1, []) == [1]`).
This goal doesn't require that all args be unifiable; only that they have
the same `ConsNull` type. Unlike the classic `lall(eq(x, []), eq(y, x))`
`conde`-branch idiom used when recursively walking a single sequence via
`conso`, this allows us to perform the same essential function while
walking distinct lists that do not necessarily terminate on the same
iteration.
Parameters
----------
args: tuple of objects
The terms to consider as an instance of the `ConsNull` type
refs: tuple of objects
The terms to use as reference types. These are not unified with the
`ConsNull` type, instead they are used to constrain the `ConsNull`
types considered valid.
default_ConsNull: type
The sequence type to use when all logic variables are unground.
"""
def nullo_goal(s):
nonlocal args, default_ConsNull
if refs is not None:
refs_rf = reify(refs, s)
else:
refs_rf = ()
args_rf = reify(args, s)
arg_null_types = set(
# Get an empty instance of the type
type(a)
for a in args_rf + refs_rf
# `ConsPair` and `ConsNull` types that are not literally `ConsPair`s
if isinstance(a, (ConsPair, ConsNull)) and not issubclass(type(a), ConsPair)
)
try:
null_type = arg_null_types.pop()
except KeyError:
null_type = default_ConsNull
if len(arg_null_types) > 0 and any(a != null_type for a in arg_null_types):
# Mismatching null types: fail.
return
g = lall(*[eq(a, null_type()) for a in args_rf])
yield from g(s)
return nullo_goal
def itero(lst, nullo_refs=None, default_ConsNull=list):
"""Construct a goal asserting that a term is an iterable type.
This is a generic version of the standard `listo` that accounts for
different iterable types supported by `cons` in Python.
See `nullo`
"""
def itero_goal(S):
nonlocal lst, nullo_refs, default_ConsNull
l_rf = reify(lst, S)
c, d = var(), var()
g = conde(
[nullo(l_rf, refs=nullo_refs, default_ConsNull=default_ConsNull)],
[conso(c, d, l_rf), itero(d, default_ConsNull=default_ConsNull)],
)
yield from g(S)
return itero_goal
def membero(x, ls):
"""Construct a goal stating that x is an item of coll."""
def membero_goal(S):
nonlocal x, ls
x_rf, ls_rf = reify((x, ls), S)
a, d = var(), var()
g = lall(conso(a, d, ls), conde([eq(a, x)], [membero(x, d)]))
yield from g(S)
return membero_goal
def appendo(lst, s, out, default_ConsNull=list):
"""Construct a goal for the relation lst + s = ls.
See Byrd thesis pg. 247
https://scholarworks.iu.edu/dspace/bitstream/handle/2022/8777/Byrd_indiana_0093A_10344.pdf
"""
def appendo_goal(S):
nonlocal lst, s, out
l_rf, s_rf, out_rf = reify((lst, s, out), S)
a, d, res = var(prefix="a"), var(prefix="d"), var(prefix="res")
_nullo = partial(nullo, default_ConsNull=default_ConsNull)
g = conde(
[
# All empty
_nullo(s_rf, l_rf, out_rf),
],
[
# `lst` is empty
conso(a, d, out_rf),
eq(s_rf, out_rf),
_nullo(l_rf, refs=(s_rf, out_rf)),
],
[
conso(a, d, l_rf),
conso(a, res, out_rf),
appendo(d, s_rf, res, default_ConsNull=default_ConsNull),
],
)
yield from g(S)
return appendo_goal
def rembero(x, lst, o, default_ConsNull=list):
"""Remove the first occurrence of `x` in `lst` resulting in `o`."""
from .constraints import neq
def rembero_goal(s):
nonlocal x, lst, o
x_rf, l_rf, o_rf = reify((x, lst, o), s)
l_car, l_cdr, r = var(), var(), var()
g = conde(
[
nullo(l_rf, o_rf, default_ConsNull=default_ConsNull),
],
[
conso(l_car, l_cdr, l_rf),
eq(x_rf, l_car),
eq(l_cdr, o_rf),
],
[
conso(l_car, l_cdr, l_rf),
neq(l_car, x),
conso(l_car, r, o_rf),
rembero(x_rf, l_cdr, r, default_ConsNull=default_ConsNull),
],
)
yield from g(s)
return rembero_goal
def permuteo(a, b, inner_eq=eq, default_ConsNull=list, no_ident=False):
"""Construct a goal asserting equality of sequences under permutation.
For example, (1, 2, 2) equates to (2, 1, 2) under permutation
>>> from kanren import var, run, permuteo
>>> x = var()
>>> run(0, x, permuteo(x, (1, 2)))
((1, 2), (2, 1))
>>> run(0, x, permuteo((2, 1, x), (2, 1, 2)))
(2,)
"""
def permuteo_goal(S):
nonlocal a, b, default_ConsNull, inner_eq
a_rf, b_rf = reify((a, b), S)
# If the lengths differ, then fail
a_len, b_len = length_hint(a_rf, -1), length_hint(b_rf, -1)
if a_len > 0 and b_len > 0 and a_len != b_len:
return
if isinstance(a_rf, Sequence):
a_type = type(a_rf)
a_perms = permutations(a_rf)
if no_ident:
next(a_perms)
if isinstance(b_rf, Sequence):
b_type = type(b_rf)
# Fail on mismatched types or straight equality (when
# `no_ident` is enabled)
if a_type != b_type or (no_ident and a_rf == b_rf):
return
try:
# `a` and `b` are sequences, so let's see if we can pull out
# all the (hash-)equivalent elements.
# XXX: Use of this requires that the equivalence relation
# implied by `inner_eq` be a *superset* of `eq`.
cntr_a, cntr_b = Counter(a_rf), Counter(b_rf)
rdcd_a, rdcd_b = cntr_a - cntr_b, cntr_b - cntr_a
if len(rdcd_a) == len(rdcd_b) == 0:
yield S
return
elif len(rdcd_a) < len(cntr_a):
a_rf, b_rf = tuple(rdcd_a.elements()), b_type(rdcd_b.elements())
a_perms = permutations(a_rf)
except TypeError:
# TODO: We could probably get more coverage for this case
# by using `HashableForm`.
pass
# If they're both ground and we're using basic unification,
# then simply check that one is a permutation of the other and
# be done. No need to create and evaluate a bunch of goals in
# order to do something that can be done right here.
# Naturally, this assumes that the `isground` checks aren't
# nearly as costly as all that other stuff. If the gains
# depend on the sizes of `a` and `b`, then we could do
# `length_hint` checks first.
if inner_eq == eq and isground(a_rf, S) and isground(b_rf, S):
if tuple(b_rf) in a_perms:
yield S
return
else:
# This has to be a definitive check, since we can only
# use the `a_perms` generator once; plus, we don't want
# to iterate over it more than once!
return
yield from lany(inner_eq(b_rf, a_type(i)) for i in a_perms)(S)
elif isinstance(b_rf, Sequence):
b_type = type(b_rf)
b_perms = permutations(b_rf)
if no_ident:
next(b_perms)
yield from lany(inner_eq(a_rf, b_type(i)) for i in b_perms)(S)
else:
# None of the arguments are proper sequences, so state that one
# should be and apply `permuteo` to that.
a_itero_g = itero(
a_rf, nullo_refs=(b_rf,), default_ConsNull=default_ConsNull
)
for S_new in a_itero_g(S):
a_new = reify(a_rf, S_new)
a_type = type(a_new)
a_perms = permutations(a_new)
if no_ident:
next(a_perms)
yield from lany(inner_eq(b_rf, a_type(i)) for i in a_perms)(S_new)
return permuteo_goal
# For backward compatibility
permuteq = permuteo
================================================
FILE: kanren/graph.py
================================================
from functools import partial
from etuples import etuple
from unification import isvar, reify, var
from .core import Zzz, conde, eq, fail, ground_order, lall, succeed
from .goals import conso, nullo
from .term import applyo
def mapo(relation, a, b, null_type=list, null_res=True, first=True):
"""Apply a relation to corresponding elements in two sequences and succeed if the relation succeeds for all pairs.""" # noqa: E501
b_car, b_cdr = var(), var()
a_car, a_cdr = var(), var()
return conde(
[nullo(a, b, default_ConsNull=null_type) if (not first or null_res) else fail],
[
conso(a_car, a_cdr, a),
conso(b_car, b_cdr, b),
Zzz(relation, a_car, b_car),
Zzz(mapo, relation, a_cdr, b_cdr, null_type=null_type, first=False),
],
)
def map_anyo(
relation, a, b, null_type=list, null_res=False, first=True, any_succeed=False
):
"""Apply a relation to corresponding elements in two sequences and succeed if at least one pair succeeds.
Parameters
----------
null_type: optional
An object that's a valid cdr for the collection type desired. If
`False` (i.e. the default value), the cdr will be inferred from the
inputs, or defaults to an empty list.
""" # noqa: E501
b_car, b_cdr = var(), var()
a_car, a_cdr = var(), var()
return conde(
[
nullo(a, b, default_ConsNull=null_type)
if (any_succeed or (first and null_res))
else fail
],
[
conso(a_car, a_cdr, a),
conso(b_car, b_cdr, b),
conde(
[
Zzz(relation, a_car, b_car),
Zzz(
map_anyo,
relation,
a_cdr,
b_cdr,
null_type=null_type,
any_succeed=True,
first=False,
),
],
[
eq(a_car, b_car),
Zzz(
map_anyo,
relation,
a_cdr,
b_cdr,
null_type=null_type,
any_succeed=any_succeed,
first=False,
),
],
),
],
)
def vararg_success(*args):
return succeed
def eq_length(u, v, default_ConsNull=list):
"""Construct a goal stating that two sequences are the same length and type."""
return mapo(vararg_success, u, v, null_type=default_ConsNull)
def reduceo(relation, in_term, out_term, *args, **kwargs):
"""Relate a term and the fixed-point of that term under a given relation.
This includes the "identity" relation.
"""
def reduceo_goal(s):
nonlocal in_term, out_term, relation, args, kwargs
in_term_rf, out_term_rf = reify((in_term, out_term), s)
# The result of reducing the input graph once
term_rdcd = var()
# Are we working "backward" and (potentially) "expanding" a graph
# (e.g. when the relation is a reduction rule)?
is_expanding = isvar(in_term_rf)
# One application of the relation assigned to `term_rdcd`
single_apply_g = relation(in_term_rf, term_rdcd, *args, **kwargs)
# Assign/equate (unify, really) the result of a single application to
# the "output" term.
single_res_g = eq(term_rdcd, out_term_rf)
# Recurse into applications of the relation (well, produce a goal that
# will do that)
another_apply_g = reduceo(relation, term_rdcd, out_term_rf, *args, **kwargs)
# We want the fixed-point value to show up in the stream output
# *first*, but that requires some checks.
if is_expanding:
# When an un-reduced term is a logic variable (e.g. we're
# "expanding"), we can't go depth first.
# We need to draw the association between (i.e. unify) the reduced
# and expanded terms ASAP, in order to produce finite
# expanded graphs first and yield results.
#
# In other words, there's no fixed-point to produce in this
# situation. Instead, for example, we have to produce an infinite
# stream of terms that have `out_term_rf` as a fixed point.
# g = conde([single_res_g, single_apply_g],
# [another_apply_g, single_apply_g])
g = lall(conde([single_res_g], [another_apply_g]), single_apply_g)
else:
# Run the recursion step first, so that we get the fixed-point as
# the first result
g = lall(single_apply_g, conde([another_apply_g], [single_res_g]))
yield from g(s)
return reduceo_goal
def walko(
goal,
graph_in,
graph_out,
rator_goal=None,
null_type=etuple,
map_rel=partial(map_anyo, null_res=True),
):
"""Apply a binary relation between all nodes in two graphs.
When `rator_goal` is used, the graphs are treated as term graphs, and the
multi-functions `rator`, `rands`, and `apply` are used to walk the graphs.
Otherwise, the graphs must be iterable according to `map_anyo`.
Parameters
----------
goal: callable
A goal that is applied to all terms in the graph.
graph_in: object
The graph for which the left-hand side of a binary relation holds.
graph_out: object
The graph for which the right-hand side of a binary relation holds.
rator_goal: callable (default None)
A goal that is applied to the rators of a graph. When specified,
`goal` is only applied to rands and it must succeed along with the
rator goal in order to descend into sub-terms.
null_type: type
The collection type used when it is not fully determined by the graph
arguments.
map_rel: callable
The map relation used to apply `goal` to a sub-graph.
"""
def walko_goal(s):
nonlocal goal, rator_goal, graph_in, graph_out, null_type, map_rel
graph_in_rf, graph_out_rf = reify((graph_in, graph_out), s)
rator_in, rands_in, rator_out, rands_out = var(), var(), var(), var()
_walko = partial(
walko, goal, rator_goal=rator_goal, null_type=null_type, map_rel=map_rel
)
g = conde(
# TODO: Use `Zzz`, if needed.
[
goal(graph_in_rf, graph_out_rf),
],
[
lall(
applyo(rator_in, rands_in, graph_in_rf),
applyo(rator_out, rands_out, graph_out_rf),
rator_goal(rator_in, rator_out),
map_rel(_walko, rands_in, rands_out, null_type=null_type),
)
if rator_goal is not None
else map_rel(_walko, graph_in_rf, graph_out_rf, null_type=null_type),
],
)
yield from g(s)
return walko_goal
def term_walko(
rator_goal,
rands_goal,
a,
b,
null_type=etuple,
no_ident=False,
format_step=None,
**kwargs
):
"""Construct a goal for walking a term graph.
This implementation is somewhat specific to the needs of `eq_comm` and
`eq_assoc`, but it could be transferred to `kanren.graph`.
XXX: Make sure `rator_goal` will succeed for unground logic variables;
otherwise, this will diverge.
XXX: `rands_goal` should not be contain `eq`, i.e. `rands_goal(x, x)`
should always fail!
"""
def single_step(s, t):
u, v = var(), var()
u_rator, u_rands = var(), var()
v_rands = var()
return lall(
ground_order((s, t), (u, v)),
applyo(u_rator, u_rands, u),
applyo(u_rator, v_rands, v),
rator_goal(u_rator),
# These make sure that there are at least two rands, which
# makes sense for commutativity and associativity, at least.
conso(var(), var(), u_rands),
conso(var(), var(), v_rands),
Zzz(rands_goal, u_rands, v_rands, u_rator, **kwargs),
)
def term_walko_step(s, t):
nonlocal rator_goal, rands_goal, null_type
u, v = var(), var()
z, w = var(), var()
return lall(
ground_order((s, t), (u, v)),
format_step(u, w) if format_step is not None else eq(u, w),
conde(
[
# Apply, then walk or return
single_step(w, v),
],
[
# Walk, then apply or return
map_anyo(term_walko_step, w, z, null_type=null_type),
conde([eq(z, v)], [single_step(z, v)]),
],
),
)
return lall(
term_walko_step(a, b)
if no_ident
else conde([term_walko_step(a, b)], [eq(a, b)]),
)
================================================
FILE: kanren/py.typed
================================================
================================================
FILE: kanren/term.py
================================================
from collections.abc import Mapping, Sequence
from cons.core import ConsError, cons
from etuples import apply as term
from etuples import rands as arguments
from etuples import rator as operator
from unification.core import _reify, _unify, construction_sentinel, reify
from unification.variable import isvar
from .core import eq, lall
from .goals import conso
def applyo(o_rator, o_rands, obj):
"""Construct a goal that relates an object to the application of its (ope)rator to its (ope)rands.
In other words, this is the relation `op(*args) == obj`. It uses the
`rator`, `rands`, and `apply` dispatch functions from `etuples`, so
implement/override those to get the desired behavior.
""" # noqa: E501
def applyo_goal(S):
nonlocal o_rator, o_rands, obj
o_rator_rf, o_rands_rf, obj_rf = reify((o_rator, o_rands, obj), S)
if not isvar(obj_rf):
# We should be able to use this goal with *any* arguments, so
# fail when the ground operations fail/err.
try:
obj_rator, obj_rands = operator(obj_rf), arguments(obj_rf)
except (ConsError, NotImplementedError):
return
# The object's rator + rands should be the same as the goal's
yield from lall(eq(o_rator_rf, obj_rator), eq(o_rands_rf, obj_rands))(S)
elif isvar(o_rands_rf) or isvar(o_rator_rf):
# The object and at least one of the rand, rators is a logic
# variable, so let's just assert a `cons` relationship between
# them
yield from conso(o_rator_rf, o_rands_rf, obj_rf)(S)
else:
# The object is a logic variable, but the rator and rands aren't.
# We assert that the object is the application of the rand and
# rators.
try:
obj_applied = term(o_rator_rf, o_rands_rf)
except (ConsError, NotImplementedError):
return
yield from eq(obj_rf, obj_applied)(S)
return applyo_goal
@term.register(object, Sequence)
def term_Sequence(rator, rands):
# Overwrite the default `apply` dispatch function and make it preserve
# types
res = cons(rator, rands)
return res
def unifiable_with_term(cls):
_reify.add((cls, Mapping), reify_term)
_unify.add((cls, cls, Mapping), unify_term)
return cls
def reify_term(obj, s):
op, args = operator(obj), arguments(obj)
op = yield _reify(op, s)
args = yield _reify(args, s)
yield construction_sentinel
yield term(op, args)
def unify_term(u, v, s):
u_op, u_args = operator(u), arguments(u)
v_op, v_args = operator(v), arguments(v)
s = yield _unify(u_op, v_op, s)
if s is not False:
s = yield _unify(u_args, v_args, s)
yield s
================================================
FILE: kanren/util.py
================================================
from collections import namedtuple
from collections.abc import Hashable, Iterable, Mapping, MutableSet, Set
from itertools import chain
HashableForm = namedtuple("HashableForm", ["type", "data"])
class FlexibleSet(MutableSet):
"""A set that uses a list (and costly identity check) for unhashable items."""
__slots__ = ("set", "list")
def __init__(self, iterable=None):
self.set = set()
self.list = []
if iterable is not None:
for i in iterable:
self.add(i)
def add(self, item):
try:
self.set.add(item)
except TypeError:
# TODO: Could try `make_hashable`.
# TODO: Use `bisect` for unhashable but orderable elements
if item not in self.list:
self.list.append(item)
def discard(self, item):
try:
self.remove(item)
except KeyError:
pass
def clear(self):
self.set.clear()
self.list.clear()
def pop(self):
try:
return self.set.pop()
except (TypeError, KeyError):
try:
return self.list.pop(-1)
except IndexError:
raise KeyError()
def remove(self, item):
try:
self.set.remove(item)
except (TypeError, KeyError):
try:
self.list.remove(item)
except ValueError:
raise KeyError()
def copy(self):
res = type(self)()
res.set = self.set.copy()
res.list = self.list.copy()
return res
def __le__(self, other):
raise NotImplementedError()
def __ge__(self, other):
raise NotImplementedError()
def __iter__(self):
return chain(self.set, self.list)
def __contains__(self, value):
try:
return value in self.set or value in self.list
except TypeError:
return value in self.list
def __len__(self):
return len(self.set) + len(self.list)
def __eq__(self, other):
if type(self) == type(other):
return self.set == other.set and self.list == other.list
elif isinstance(other, Set):
return len(self.list) == 0 and other.issuperset(self.set)
return NotImplemented
def __repr__(self):
return f"FlexibleSet([{', '.join(str(s) for s in self)}])"
def hashable(x):
try:
hash(x)
return True
except TypeError:
return False
def dicthash(d):
return hash(frozenset(d.items()))
def make_hashable(x):
# TODO: Better as a dispatch function?
if hashable(x):
return x
if isinstance(x, slice):
return HashableForm(type(x), (x.start, x.stop, x.step))
if isinstance(x, Mapping):
return HashableForm(type(x), frozenset(tuple(multihash(i) for i in x.items())))
if isinstance(x, Iterable):
return HashableForm(type(x), tuple(multihash(i) for i in x))
raise TypeError(f"Hashing not covered for {x}")
def multihash(x):
return hash(make_hashable(x))
def unique(seq, key=lambda x: x):
seen = set()
for item in seq:
try:
k = key(item)
except TypeError:
# Just yield it and hope for the best, since we can't efficiently
# check if we've seen it before.
yield item
continue
if not isinstance(k, Hashable):
# Just yield it and hope for the best, since we can't efficiently
# check if we've seen it before.
yield item
elif k not in seen:
seen.add(key(item))
yield item
def intersection(*seqs):
return (item for item in seqs[0] if all(item in seq for seq in seqs[1:]))
def groupsizes(total, len):
"""Construct groups of length len that add up to total.
>>> from kanren.util import groupsizes
>>> tuple(groupsizes(4, 2))
((1, 3), (2, 2), (3, 1))
"""
if len == 1:
yield (total,)
else:
for i in range(1, total - len + 1 + 1):
for perm in groupsizes(total - i, len - 1):
yield (i,) + perm
def pprint(g): # pragma: no cover
"""Pretty print a tree of goals."""
if callable(g) and hasattr(g, "__name__"):
return g.__name__
if isinstance(g, type):
return g.__name__
if isinstance(g, tuple):
return "(" + ", ".join(map(pprint, g)) + ")"
return str(g)
def index(tup, ind):
"""Fancy indexing with tuples."""
return tuple(tup[i] for i in ind)
================================================
FILE: pyproject.toml
================================================
[build-system]
requires = ["setuptools>=77.0.0", "setuptools-scm[toml]"]
build-backend = "setuptools.build_meta"
[project]
name = "miniKanren"
dynamic = ['version']
requires-python = ">=3.9"
authors = [{ name = "Brandon T. Willard", email = "brandonwillard+kanren@gmail.com" }]
description = "Relational programming in Python"
readme = "README.md"
license = "BSD-3-Clause"
license-files = ["LICENSE.txt"]
classifiers = [
"Development Status :: 5 - Production/Stable",
"Intended Audience :: Science/Research",
"Intended Audience :: Developers",
"Operating System :: OS Independent",
"Programming Language :: Python",
"Programming Language :: Python :: 3",
"Programming Language :: Python :: 3.9",
"Programming Language :: Python :: 3.10",
"Programming Language :: Python :: 3.11",
"Programming Language :: Python :: 3.12",
"Programming Language :: Python :: Implementation :: CPython",
"Programming Language :: Python :: Implementation :: PyPy",
]
dependencies = [
"cons >= 0.4.0",
"etuples >= 0.3.1",
"logical-unification >= 0.4.1",
"toolz",
]
[project.urls]
repository = "http://github.com/pythological/kanren"
[dependency-groups]
test = [
"pytest",
"sympy",
]
[tool.setuptools]
include-package-data = true
[tool.setuptools.packages.find]
include = ["kanren*"]
exclude = ["doc*", "examples*", "tests*"]
[tool.setuptools.package-data]
kanren = ["py.typed"]
[tool.setuptools_scm]
version_scheme = "guess-next-dev"
local_scheme = "dirty-tag"
================================================
FILE: pytest.ini
================================================
# content of pytest.ini
[pytest]
addopts = --doctest-modules
norecursedirs = examples
testpaths = tests
doctest_optionflags= NORMALIZE_WHITESPACE IGNORE_EXCEPTION_DETAIL
================================================
FILE: release-notes
================================================
New in version 0.2
* Python 3 support
* Dictionary unification
* Use multiple dispatch to extend unify, reify, isvar
* Add convenience class decorator `unifiable` to facilitate trivial
unification of user classes
* Add term operations term, arguments, operator, also multiply dispatched
* Depend on the toolz library
* Performance degredation as a result of multiple dispatch
* Arithmetic goals
* Improved set matching performance
================================================
FILE: requirements.txt
================================================
-e ./
coveralls
pydocstyle>=3.0.0
pytest>=5.0.0
pytest-cov>=2.6.1
pytest-html>=1.20.0
pylint>=2.3.1
black>=19.3b0; platform.python_implementation!='PyPy'
diff-cover
sympy
versioneer
coverage>=5.1
pre-commit
================================================
FILE: setup.cfg
================================================
[pydocstyle]
# Ignore errors for missing docstrings.
# Ignore D202 (No blank lines allowed after function docstring)
# due to bug in black: https://github.com/ambv/black/issues/355
add-ignore = D100,D101,D102,D103,D104,D105,D106,D107,D202
convention = numpy
[tool:pytest]
python_files=test*.py
testpaths=tests
[coverage:run]
relative_files = True
omit =
kanren/_version.py
tests/*
branch = True
[coverage:report]
exclude_lines =
pragma: no cover
def __repr__
raise AssertionError
raise TypeError
return NotImplemented
raise NotImplementedError
if __name__ == .__main__.:
assert False
show_missing = 1
[isort]
profile = black
lines_after_imports = 2
lines_between_sections = 1
honor_noqa = True
skip_gitignore = True
[flake8]
max-line-length = 88
extend-ignore = E203, W503
per-file-ignores =
**/__init__.py:F401,E402,F403
[pylint]
max-line-length = 88
[pylint.messages_control]
disable = C0330, C0326
[mypy]
ignore_missing_imports = True
no_implicit_optional = True
check_untyped_defs = False
strict_equality = True
warn_redundant_casts = True
warn_unused_configs = True
warn_unused_ignores = True
warn_return_any = True
warn_no_return = False
warn_unreachable = True
show_error_codes = True
allow_redefinition = False
files = kanren,tests
================================================
FILE: tests/__init__.py
================================================
================================================
FILE: tests/test_assoccomm.py
================================================
from collections.abc import Sequence
from copy import copy
import pytest
from cons import cons
from etuples.core import etuple
from unification import isvar, reify, unify, var
from kanren.assoccomm import (
assoc_args,
assoc_flatten,
associative,
commutative,
eq_assoc,
eq_assoc_args,
eq_assoccomm,
eq_comm,
flatten_assoc_args,
)
from kanren.core import run
from kanren.facts import fact
from kanren.term import arguments, operator, term
@pytest.fixture(autouse=True)
def clear_assoccomm():
old_commutative_index = copy(commutative.index)
old_commutative_facts = copy(commutative.facts)
old_associative_index = copy(associative.index)
old_associative_facts = copy(associative.facts)
try:
yield
finally:
commutative.index = old_commutative_index
commutative.facts = old_commutative_facts
associative.index = old_associative_index
associative.facts = old_associative_facts
class Node(object):
def __init__(self, op, args):
self.op = op
self.args = args
def __eq__(self, other):
return (
type(self) == type(other)
and self.op == other.op
and self.args == other.args
)
def __hash__(self):
return hash((type(self), self.op, self.args))
def __str__(self):
return "%s(%s)" % (self.op.name, ", ".join(map(str, self.args)))
__repr__ = __str__
class Operator(object):
def __init__(self, name):
self.name = name
Add = Operator("add")
Mul = Operator("mul")
def add(*args):
return Node(Add, args)
def mul(*args):
return Node(Mul, args)
@term.register(Operator, Sequence)
def term_Operator(op, args):
return Node(op, args)
@arguments.register(Node)
def arguments_Node(n):
return n.args
@operator.register(Node)
def operator_Node(n):
return n.op
def results(g, s=None):
if s is None:
s = dict()
return tuple(g(s))
def test_eq_comm():
x, y, z = var(), var(), var()
comm_op = "comm_op"
fact(commutative, comm_op)
assert run(0, True, eq_comm(1, 1)) == (True,)
assert run(0, True, eq_comm((comm_op, 1, 2, 3), (comm_op, 1, 2, 3))) == (True,)
assert run(0, True, eq_comm((comm_op, 3, 2, 1), (comm_op, 1, 2, 3))) == (True,)
assert run(0, y, eq_comm((comm_op, 3, y, 1), (comm_op, 1, 2, 3))) == (2,)
assert run(0, (x, y), eq_comm((comm_op, x, y, 1), (comm_op, 1, 2, 3))) == (
(2, 3),
(3, 2),
)
assert run(0, (x, y), eq_comm((comm_op, 2, 3, 1), (comm_op, 1, x, y))) == (
(2, 3),
(3, 2),
)
assert not run(
0, True, eq_comm(("op", 3, 2, 1), ("op", 1, 2, 3))
) # not commutative
assert not run(0, True, eq_comm((3, comm_op, 2, 1), (comm_op, 1, 2, 3)))
assert not run(0, True, eq_comm((comm_op, 1, 2, 1), (comm_op, 1, 2, 3)))
assert not run(0, True, eq_comm(("op", 1, 2, 3), (comm_op, 1, 2, 3)))
# Test for variable args
res = run(4, (x, y), eq_comm(x, y))
exp_res_form = (
(etuple(comm_op, x, y), etuple(comm_op, y, x)),
(x, y),
(etuple(etuple(comm_op, x, y)), etuple(etuple(comm_op, y, x))),
(etuple(comm_op, x, y, z), etuple(comm_op, x, z, y)),
)
for a, b in zip(res, exp_res_form):
s = unify(a, b)
assert s is not False
assert all(isvar(i) for i in reify((x, y, z), s))
# Make sure it can unify single elements
assert (3,) == run(0, x, eq_comm((comm_op, 1, 2, 3), (comm_op, 2, x, 1)))
# `eq_comm` should propagate through
assert (3,) == run(
0, x, eq_comm(("div", 1, (comm_op, 1, 2, 3)), ("div", 1, (comm_op, 2, x, 1)))
)
# Now it should not
assert () == run(
0, x, eq_comm(("div", 1, ("div", 1, 2, 3)), ("div", 1, ("div", 2, x, 1)))
)
expected_res = {(1, 2, 3), (2, 1, 3), (3, 1, 2), (1, 3, 2), (2, 3, 1), (3, 2, 1)}
assert expected_res == set(
run(0, (x, y, z), eq_comm((comm_op, 1, 2, 3), (comm_op, x, y, z)))
)
assert expected_res == set(
run(0, (x, y, z), eq_comm((comm_op, x, y, z), (comm_op, 1, 2, 3)))
)
assert expected_res == set(
run(
0,
(x, y, z),
eq_comm(("div", 1, (comm_op, 1, 2, 3)), ("div", 1, (comm_op, x, y, z))),
)
)
e1 = (comm_op, (comm_op, 1, x), y)
e2 = (comm_op, 2, (comm_op, 3, 1))
assert run(0, (x, y), eq_comm(e1, e2)) == ((3, 2),)
e1 = ((comm_op, 3, 1),)
e2 = ((comm_op, 1, x),)
assert run(0, x, eq_comm(e1, e2)) == (3,)
e1 = (2, (comm_op, 3, 1))
e2 = (y, (comm_op, 1, x))
assert run(0, (x, y), eq_comm(e1, e2)) == ((3, 2),)
e1 = (comm_op, (comm_op, 1, x), y)
e2 = (comm_op, 2, (comm_op, 3, 1))
assert run(0, (x, y), eq_comm(e1, e2)) == ((3, 2),)
@pytest.mark.xfail(reason="`applyo`/`buildo` needs to be a constraint.", strict=True)
def test_eq_comm_object():
x = var("x")
fact(commutative, Add)
fact(associative, Add)
assert run(0, x, eq_comm(add(1, 2, 3), add(3, 1, x))) == (2,)
assert set(run(0, x, eq_comm(add(1, 2), x))) == set((add(1, 2), add(2, 1)))
assert set(run(0, x, eq_assoccomm(add(1, 2, 3), add(1, x)))) == set(
(add(2, 3), add(3, 2))
)
def test_flatten_assoc_args():
op = "add"
def op_pred(x):
return x == op
assert list(flatten_assoc_args(op_pred, [op, 1, 2, 3, 4])) == [op, 1, 2, 3, 4]
assert list(flatten_assoc_args(op_pred, [op, 1, 2, [op]])) == [op, 1, 2, [op]]
assert list(flatten_assoc_args(op_pred, [[op, 1, 2, [op]]])) == [1, 2, [op]]
res = list(
flatten_assoc_args(
op_pred, [[1, 2, op], 3, [op, 4, [op, [op]]], [op, 5], 6, op, 7]
)
)
exp_res = [[1, 2, op], 3, 4, [op], 5, 6, op, 7]
assert res == exp_res
def test_assoc_args():
op = "add"
def op_pred(x):
return x == op
assert tuple(assoc_args(op, (1, 2, 3), 2)) == (
((op, 1, 2), 3),
(1, (op, 2, 3)),
)
assert tuple(assoc_args(op, [1, 2, 3], 2)) == (
[[op, 1, 2], 3],
[1, [op, 2, 3]],
)
assert tuple(assoc_args(op, (1, 2, 3), 1)) == (
((op, 1), 2, 3),
(1, (op, 2), 3),
(1, 2, (op, 3)),
)
assert tuple(assoc_args(op, (1, 2, 3), 3)) == ((1, 2, 3),)
f_rands = flatten_assoc_args(op_pred, (1, (op, 2, 3)))
assert tuple(assoc_args(op, f_rands, 2, ctor=tuple)) == (
((op, 1, 2), 3),
(1, (op, 2, 3)),
)
def test_eq_assoc_args():
assoc_op = "assoc_op"
fact(associative, assoc_op)
assert not run(0, True, eq_assoc_args(assoc_op, (1,), [1], n=None))
assert run(0, True, eq_assoc_args(assoc_op, (1,), (1,), n=None)) == (True,)
assert run(0, True, eq_assoc_args(assoc_op, (1, 1), (1, 1))) == (True,)
assert run(0, True, eq_assoc_args(assoc_op, (1, 2, 3), (1, (assoc_op, 2, 3)))) == (
True,
)
assert run(0, True, eq_assoc_args(assoc_op, (1, (assoc_op, 2, 3)), (1, 2, 3))) == (
True,
)
assert run(
0, True, eq_assoc_args(assoc_op, (1, (assoc_op, 2, 3), 4), (1, 2, 3, 4))
) == (True,)
assert not run(
0, True, eq_assoc_args(assoc_op, (1, 2, 3), (1, (assoc_op, 2, 3), 4))
)
x, y = var(), var()
assert run(0, True, eq_assoc_args(assoc_op, (x,), (x,), n=None)) == (True,)
assert run(0, x, eq_assoc_args(assoc_op, x, (y,), n=None)) == ((y,),)
assert run(0, x, eq_assoc_args(assoc_op, (y,), x, n=None)) == ((y,),)
assert run(0, x, eq_assoc_args(assoc_op, (1, x, 4), (1, 2, 3, 4))) == (
(assoc_op, 2, 3),
)
assert run(0, x, eq_assoc_args(assoc_op, (1, 2, 3, 4), (1, x, 4))) == (
(assoc_op, 2, 3),
)
assert run(0, x, eq_assoc_args(assoc_op, [1, x, 4], [1, 2, 3, 4])) == (
[assoc_op, 2, 3],
)
assert run(0, True, eq_assoc_args(assoc_op, (1, 1), ("other_op", 1, 1))) == ()
assert run(0, x, eq_assoc_args(assoc_op, (1, 2, 3), x, n=2)) == (
((assoc_op, 1, 2), 3),
(1, (assoc_op, 2, 3)),
)
assert run(0, x, eq_assoc_args(assoc_op, x, (1, 2, 3), n=2)) == (
((assoc_op, 1, 2), 3),
(1, (assoc_op, 2, 3)),
)
assert run(0, x, eq_assoc_args(assoc_op, (1, 2, 3), x)) == (
((assoc_op, 1, 2), 3),
(1, (assoc_op, 2, 3)),
(1, 2, 3),
)
assert () not in run(0, x, eq_assoc_args(assoc_op, (), x, no_ident=True))
assert (1,) not in run(0, x, eq_assoc_args(assoc_op, (1,), x, no_ident=True))
assert (1, 2, 3) not in run(
0, x, eq_assoc_args(assoc_op, (1, 2, 3), x, no_ident=True)
)
assert (
run(
0,
True,
eq_assoc_args(
assoc_op,
(1, (assoc_op, 2, 3)),
(1, (assoc_op, 2, 3)),
no_ident=True,
),
)
== ()
)
assert run(
0,
True,
eq_assoc_args(
assoc_op,
(1, (assoc_op, 2, 3)),
((assoc_op, 1, 2), 3),
no_ident=True,
),
) == (True,)
def test_eq_assoc():
assoc_op = "assoc_op"
fact(associative, assoc_op)
assert run(0, True, eq_assoc(1, 1)) == (True,)
assert run(0, True, eq_assoc((assoc_op, 1, 2, 3), (assoc_op, 1, 2, 3))) == (True,)
assert not run(0, True, eq_assoc((assoc_op, 3, 2, 1), (assoc_op, 1, 2, 3)))
assert run(
0, True, eq_assoc((assoc_op, (assoc_op, 1, 2), 3), (assoc_op, 1, 2, 3))
) == (True,)
assert run(
0, True, eq_assoc((assoc_op, 1, 2, 3), (assoc_op, (assoc_op, 1, 2), 3))
) == (True,)
o = "op"
assert not run(0, True, eq_assoc((o, 1, 2, 3), (o, (o, 1, 2), 3)))
x = var()
res = run(0, x, eq_assoc((assoc_op, 1, 2, 3), x, n=2))
assert res == (
(assoc_op, (assoc_op, 1, 2), 3),
(assoc_op, 1, 2, 3),
(assoc_op, 1, (assoc_op, 2, 3)),
)
res = run(0, x, eq_assoc(x, (assoc_op, 1, 2, 3), n=2))
assert res == (
(assoc_op, (assoc_op, 1, 2), 3),
(assoc_op, 1, 2, 3),
(assoc_op, 1, (assoc_op, 2, 3)),
)
y, z = var(), var()
# Check results when both arguments are variables
res = run(3, (x, y), eq_assoc(x, y))
exp_res_form = (
(etuple(assoc_op, x, y, z), etuple(assoc_op, etuple(assoc_op, x, y), z)),
(x, y),
(
etuple(etuple(assoc_op, x, y, z)),
etuple(etuple(assoc_op, etuple(assoc_op, x, y), z)),
),
)
for a, b in zip(res, exp_res_form):
s = unify(a, b)
assert s is not False, (a, b)
assert all(isvar(i) for i in reify((x, y, z), s))
# Make sure it works with `cons`
res = run(0, (x, y), eq_assoc(cons(x, y), (assoc_op, 1, 2, 3)))
assert res == (
(assoc_op, ((assoc_op, 1, 2), 3)),
(assoc_op, (1, 2, 3)),
(assoc_op, (1, (assoc_op, 2, 3))),
)
res = run(1, (x, y), eq_assoc(cons(x, y), (x, z, 2, 3)))
assert res == ((assoc_op, ((assoc_op, z, 2), 3)),)
# Don't use a predicate that can never succeed, e.g.
# associative_2 = Relation("associative_2")
# run(1, (x, y), eq_assoc(cons(x, y), (x, z), op_predicate=associative_2))
# Nested expressions should work now
expr1 = (assoc_op, 1, 2, (assoc_op, x, 5, 6))
expr2 = (assoc_op, (assoc_op, 1, 2), 3, 4, 5, 6)
assert run(0, x, eq_assoc(expr1, expr2, n=2)) == ((assoc_op, 3, 4),)
def test_assoc_flatten():
add = "add"
mul = "mul"
fact(commutative, add)
fact(associative, add)
fact(commutative, mul)
fact(associative, mul)
assert run(
0,
True,
assoc_flatten((mul, 1, (add, 2, 3), (mul, 4, 5)), (mul, 1, (add, 2, 3), 4, 5)),
) == (True,)
x = var()
assert run(
0,
x,
assoc_flatten((mul, 1, (add, 2, 3), (mul, 4, 5)), x),
) == ((mul, 1, (add, 2, 3), 4, 5),)
assert run(
0,
True,
assoc_flatten(
("op", 1, (add, 2, 3), (mul, 4, 5)), ("op", 1, (add, 2, 3), (mul, 4, 5))
),
) == (True,)
assert run(0, x, assoc_flatten(("op", 1, (add, 2, 3), (mul, 4, 5)), x)) == (
("op", 1, (add, 2, 3), (mul, 4, 5)),
)
def test_eq_assoccomm():
x, y = var(), var()
ac = "commassoc_op"
fact(commutative, ac)
fact(associative, ac)
assert run(0, True, eq_assoccomm(1, 1)) == (True,)
assert run(0, True, eq_assoccomm((1,), (1,))) == (True,)
assert run(0, True, eq_assoccomm(x, (1,))) == (True,)
assert run(0, True, eq_assoccomm((1,), x)) == (True,)
# Assoc only
assert run(0, True, eq_assoccomm((ac, 1, (ac, 2, 3)), (ac, (ac, 1, 2), 3))) == (
True,
)
# Commute only
assert run(0, True, eq_assoccomm((ac, 1, (ac, 2, 3)), (ac, (ac, 3, 2), 1))) == (
True,
)
# Both
assert run(0, True, eq_assoccomm((ac, 1, (ac, 3, 2)), (ac, (ac, 1, 2), 3))) == (
True,
)
exp_res = set(
(
(ac, 1, 3, 2),
(ac, 1, 2, 3),
(ac, 2, 1, 3),
(ac, 2, 3, 1),
(ac, 3, 1, 2),
(ac, 3, 2, 1),
(ac, 1, (ac, 2, 3)),
(ac, 1, (ac, 3, 2)),
(ac, 2, (ac, 1, 3)),
(ac, 2, (ac, 3, 1)),
(ac, 3, (ac, 1, 2)),
(ac, 3, (ac, 2, 1)),
(ac, (ac, 2, 3), 1),
(ac, (ac, 3, 2), 1),
(ac, (ac, 1, 3), 2),
(ac, (ac, 3, 1), 2),
(ac, (ac, 1, 2), 3),
(ac, (ac, 2, 1), 3),
)
)
assert set(run(0, x, eq_assoccomm((ac, 1, (ac, 2, 3)), x))) == exp_res
assert set(run(0, x, eq_assoccomm((ac, 1, 3, 2), x))) == exp_res
assert set(run(0, x, eq_assoccomm((ac, 2, (ac, 3, 1)), x))) == exp_res
# LHS variations
assert set(run(0, x, eq_assoccomm(x, (ac, 1, (ac, 2, 3))))) == exp_res
assert run(0, (x, y), eq_assoccomm((ac, (ac, 1, x), y), (ac, 2, (ac, 3, 1)))) == (
(2, 3),
(3, 2),
)
assert run(0, True, eq_assoccomm((ac, (ac, 1, 2), 3), (ac, 1, 2, 3))) == (True,)
assert run(0, True, eq_assoccomm((ac, 3, (ac, 1, 2)), (ac, 1, 2, 3))) == (True,)
assert run(0, True, eq_assoccomm((ac, 1, 1), ("other_op", 1, 1))) == ()
assert run(0, x, eq_assoccomm((ac, 3, (ac, 1, 2)), (ac, 1, x, 3))) == (2,)
# Both arguments unground
op_lv = var()
z = var()
res = run(4, (x, y), eq_assoccomm(x, y))
exp_res_form = (
(etuple(op_lv, x, y), etuple(op_lv, y, x)),
(y, y),
(
etuple(etuple(op_lv, x, y)),
etuple(etuple(op_lv, y, x)),
),
(
etuple(op_lv, x, y, z),
etuple(op_lv, etuple(op_lv, x, y), z),
),
)
for a, b in zip(res, exp_res_form):
s = unify(a, b)
assert (
op_lv not in s
or (s[op_lv],) in associative.facts
or (s[op_lv],) in commutative.facts
)
assert s is not False, (a, b)
assert all(isvar(i) for i in reify((x, y, z), s))
def test_assoccomm_algebra():
add = "add"
mul = "mul"
fact(commutative, add)
fact(associative, add)
fact(commutative, mul)
fact(associative, mul)
x, y = var(), var()
pattern = (mul, (add, 1, x), y) # (1 + x) * y
expr = (mul, 2, (add, 3, 1)) # 2 * (3 + 1)
assert run(0, (x, y), eq_assoccomm(pattern, expr)) == ((3, 2),)
def test_assoccomm_objects():
fact(commutative, Add)
fact(associative, Add)
x = var()
assert run(0, True, eq_assoccomm(add(1, 2, 3), add(3, 1, 2))) == (True,)
assert run(0, x, eq_assoccomm(add(1, 2, 3), add(1, 2, x))) == (3,)
assert run(0, x, eq_assoccomm(add(1, 2, 3), add(x, 2, 1))) == (3,)
================================================
FILE: tests/test_constraints.py
================================================
from itertools import permutations
from cons import cons
from pytest import raises
from unification import reify, unify, var
from unification.core import _reify, stream_eval
from kanren import conde, eq, run
from kanren.constraints import (
ConstrainedState,
ConstrainedVar,
DisequalityStore,
isinstanceo,
neq,
typeo,
)
from kanren.core import lconj
from kanren.goals import membero
def test_ConstrainedState():
a_lv, b_lv = var(), var()
ks = ConstrainedState()
assert repr(ks) == "ConstrainedState({}, {})"
assert ks == {}
assert {} == ks
assert not ks == {a_lv: 1}
assert not ks == ConstrainedState({a_lv: 1})
assert unify(1, 1, ks) is not None
assert unify(1, 2, ks) is False
assert unify(b_lv, a_lv, ks)
assert unify(a_lv, b_lv, ks)
assert unify(a_lv, b_lv, ks)
# Now, try that with a constraint (that's never used).
ks.constraints[DisequalityStore] = DisequalityStore({a_lv: {1}})
assert not ks == {a_lv: 1}
assert not ks == ConstrainedState({a_lv: 1})
assert unify(1, 1, ks) is not None
assert unify(1, 2, ks) is False
assert unify(b_lv, a_lv, ks)
assert unify(a_lv, b_lv, ks)
assert unify(a_lv, b_lv, ks)
ks = ConstrainedState(
{a_lv: 1}, constraints={DisequalityStore: DisequalityStore({b_lv: {1}})}
)
ks_2 = ks.copy()
assert ks == ks_2
assert ks is not ks_2
assert ks.constraints is not ks_2.constraints
assert ks.constraints[DisequalityStore] is not ks_2.constraints[DisequalityStore]
assert (
ks.constraints[DisequalityStore].lvar_constraints[b_lv]
== ks_2.constraints[DisequalityStore].lvar_constraints[b_lv]
)
assert (
ks.constraints[DisequalityStore].lvar_constraints[b_lv]
is not ks_2.constraints[DisequalityStore].lvar_constraints[b_lv]
)
def test_reify():
var_a = var("a")
ks = ConstrainedState()
assert repr(ConstrainedVar(var_a, ks)) == "~a: {}"
de = DisequalityStore({var_a: {1, 2}})
ks.constraints[DisequalityStore] = de
assert repr(de) == "ConstraintStore(neq: {~a: {1, 2}})"
assert de.constraints_str(var()) == ""
assert repr(ConstrainedVar(var_a, ks)) == "~a: {neq {1, 2}}"
# TODO: Make this work with `reify` when `var('a')` isn't in `ks`.
assert isinstance(reify(var_a, ks), ConstrainedVar)
assert repr(stream_eval(_reify(var_a, ks))) == "~a: {neq {1, 2}}"
def test_ConstraintStore():
a_lv, b_lv = var(), var()
assert DisequalityStore({a_lv: {1}}) == DisequalityStore({a_lv: {1}})
assert DisequalityStore({a_lv: {1}}) != DisequalityStore({a_lv: {1}, b_lv: {}})
assert a_lv in DisequalityStore({a_lv: {1}})
def test_ConstrainedVar():
a_lv = var()
a_clv = ConstrainedVar(a_lv, ConstrainedState())
assert a_lv == a_clv
assert a_clv == a_lv
assert hash(a_lv) == hash(a_clv)
assert a_lv in {a_clv}
assert a_clv in {a_lv}
def test_disequality_basic():
a_lv, b_lv = var(), var()
ks = ConstrainedState()
de = DisequalityStore({a_lv: {1}})
ks.constraints[DisequalityStore] = de
assert unify(a_lv, 1, ks) is False
ks = unify(a_lv, b_lv, ks)
assert unify(b_lv, 1, ks) is False
res = list(lconj(neq({}, 1))({}))
assert len(res) == 1
res = list(lconj(neq(1, {}))({}))
assert len(res) == 1
res = list(lconj(neq({}, {}))({}))
assert len(res) == 0
res = list(lconj(neq(a_lv, 1))({}))
assert len(res) == 1
assert isinstance(res[0], ConstrainedState)
assert res[0].constraints[DisequalityStore].lvar_constraints[a_lv] == {1}
res = list(lconj(neq(1, a_lv))({}))
assert len(res) == 1
assert isinstance(res[0], ConstrainedState)
assert res[0].constraints[DisequalityStore].lvar_constraints[a_lv] == {1}
res = list(lconj(neq(a_lv, 1), neq(a_lv, 2), neq(a_lv, 1))({}))
assert len(res) == 1
assert isinstance(res[0], ConstrainedState)
assert res[0].constraints[DisequalityStore].lvar_constraints[a_lv] == {1, 2}
res = list(lconj(neq(a_lv, 1), eq(a_lv, 2))({}))
assert len(res) == 1
assert isinstance(res[0], ConstrainedState)
# The constrained variable is already ground and satisfies the constraint,
# so it should've been removed from the store
assert a_lv not in res[0].constraints[DisequalityStore].lvar_constraints
assert res[0][a_lv] == 2
res = list(lconj(eq(a_lv, 1), neq(a_lv, 1))({}))
assert res == []
def test_disequality():
a_lv, b_lv = var(), var()
q_lv, c_lv = var(), var()
goal_sets = [
([neq(a_lv, 1)], 1),
([neq(cons(1, a_lv), [1]), eq(a_lv, [])], 0),
([neq(cons(1, a_lv), [1]), eq(a_lv, b_lv), eq(b_lv, [])], 0),
([neq([1], cons(1, a_lv)), eq(a_lv, b_lv), eq(b_lv, [])], 0),
# TODO FIXME: This one won't work due to an ambiguity in `cons`.
# (
# [
# neq([1], cons(1, a_lv)),
# eq(a_lv, b_lv),
# # Both make `cons` produce a list
# conde([eq(b_lv, None)], [eq(b_lv, [])]),
# ],
# 0,
# ),
([neq(cons(1, a_lv), [1]), eq(a_lv, b_lv), eq(b_lv, tuple())], 1),
([neq([1], cons(1, a_lv)), eq(a_lv, b_lv), eq(b_lv, tuple())], 1),
(
[
neq([1], cons(1, a_lv)),
eq(a_lv, b_lv),
# The first should fail, the second should succeed
conde([eq(b_lv, [])], [eq(b_lv, tuple())]),
],
1,
),
([neq(a_lv, 1), eq(a_lv, 1)], 0),
([neq(a_lv, 1), eq(b_lv, 1), eq(a_lv, b_lv)], 0),
([neq(a_lv, 1), eq(b_lv, 1), eq(a_lv, b_lv)], 0),
([neq(a_lv, b_lv), eq(b_lv, c_lv), eq(c_lv, a_lv)], 0),
]
for i, (goal, num_results) in enumerate(goal_sets):
# The order of goals should not matter, so try them all
for goal_ord in permutations(goal):
res = list(lconj(*goal_ord)({}))
assert len(res) == num_results, (i, goal_ord)
res = list(lconj(*goal_ord)(ConstrainedState()))
assert len(res) == num_results, (i, goal_ord)
assert len(run(0, q_lv, *goal_ord)) == num_results, (i, goal_ord)
def test_typeo_basic():
a_lv, q_lv = var(), var()
assert run(0, q_lv, typeo(q_lv, int)) == (q_lv,)
assert run(0, q_lv, typeo(1, int)) == (q_lv,)
assert run(0, q_lv, typeo(1, str)) == ()
assert run(0, q_lv, typeo("hi", str)) == (q_lv,)
assert run(0, q_lv, typeo([], q_lv)) == (q_lv,)
# Invalid second arg type (i.e. not a type)
assert run(0, q_lv, typeo(1, 1)) == ()
assert run(0, q_lv, membero(q_lv, (1, "cat", 2.2, "hat")), typeo(q_lv, str)) == (
"cat",
"hat",
)
with raises(ValueError):
run(0, q_lv, typeo(a_lv, str), typeo(a_lv, int))
def test_typeo():
a_lv, b_lv, q_lv = var(), var(), var()
goal_sets = [
# Logic variable instance type that's immediately ground in another
# goal
([typeo(q_lv, int), eq(q_lv, 1)], (1,)),
# Use an unhashable constrained term
([typeo(q_lv, list), eq(q_lv, [])], ([],)),
# TODO: A constraint parameter that is never ground
# ([typeo(a_lv, q_lv), eq(a_lv, 1)], (int,)),
# A non-ground, non-logic variable instance argument that changes type
# when ground
([typeo(cons(1, a_lv), list), eq(a_lv, [])], (q_lv,)),
# Logic variable instance and type arguments
([typeo(q_lv, int), eq(b_lv, 1), eq(b_lv, q_lv)], (1,)),
# The same, but with `conde`
(
[
typeo(q_lv, int),
# One succeeds, one fails
conde([eq(b_lv, 1)], [eq(b_lv, "hi")]),
eq(b_lv, q_lv),
],
(1,),
),
# Logic variable instance argument that's eventually grounded to a
# mismatched instance type through another logic variable
([typeo(q_lv, int), eq(b_lv, 1.0), eq(b_lv, q_lv)], ()),
# Logic variable type argument that's eventually grounded to a
# mismatched instance type through another logic variable (i.e. both
# arguments are ground to `int` types)
([typeo(q_lv, b_lv), eq(b_lv, int), eq(b_lv, q_lv)], ()),
# Logic variable type argument that's eventually grounded to a
# mismatched instance type through another logic variable (i.e. both
# arguments are ground to the value `1`, which violates the second
# argument type expectations)
([typeo(q_lv, b_lv), eq(b_lv, 1), eq(b_lv, q_lv)], ()),
# Check a term that's unground by ground enough for this constraint
([typeo(a_lv, tuple), eq([(b_lv,)], a_lv)], ()),
]
for i, (goal, expected) in enumerate(goal_sets):
for goal_ord in permutations(goal):
res = run(0, q_lv, *goal_ord)
assert res == expected, (i, goal_ord)
def test_instanceo_basic():
q_lv = var()
assert run(0, q_lv, isinstanceo(q_lv, int)) == (q_lv,)
assert run(0, q_lv, isinstanceo(1, int)) == (q_lv,)
assert run(0, q_lv, isinstanceo(1, object)) == (q_lv,)
# NOTE: Not currently supported.
# assert run(0, q_lv, isinstanceo(1, (int, object))) == (q_lv,)
assert run(0, q_lv, isinstanceo(1, str)) == ()
# NOTE: Not currently supported.
# assert run(0, q_lv, isinstanceo(1, (str, list))) == ()
assert run(0, q_lv, isinstanceo("hi", str)) == (q_lv,)
# Invalid second arg type (i.e. not a type)
assert run(0, q_lv, isinstanceo(1, 1)) == ()
def test_instanceo():
b_lv, q_lv = var(), var()
goal_sets = [
# Logic variable instance type that's immediately ground in another
# goal
([isinstanceo(q_lv, list), eq(q_lv, [])], ([],)),
# Logic variable in the type argument that's eventually unified with
# a valid type for the given instance argument
([isinstanceo([], q_lv), eq(q_lv, list)], (list,)),
# Logic variable type argument that's eventually reified to a tuple
# containing a valid type for the instance argument
# NOTE: Not currently supported.
# (
# [isinstanceo([], q_lv), eq(q_lv, (int, b_lv)), eq(b_lv, list)],
# ((int, list),),
# ),
# A non-ground, non-logic variable instance argument that changes type
# when ground
([isinstanceo(cons(1, q_lv), list), eq(q_lv, [])], ([],)),
# Logic variable instance argument that's eventually grounded through
# another logic variable
([isinstanceo(q_lv, int), eq(b_lv, 1), eq(b_lv, q_lv)], (1,)),
# The same, but with `conde`
(
[
isinstanceo(q_lv, int),
# One succeeds, one fails
conde([eq(b_lv, 1)], [eq(b_lv, "hi")]),
eq(b_lv, q_lv),
],
(1,),
),
# Logic variable instance argument that's eventually grounded to a
# mismatched instance type through another logic variable
([isinstanceo(q_lv, int), eq(b_lv, 1.0), eq(b_lv, q_lv)], ()),
# Logic variable type argument that's eventually grounded to a
# mismatched instance type through another logic variable (i.e. both
# arguments are ground to `int` types)
([isinstanceo(q_lv, b_lv), eq(b_lv, int), eq(b_lv, q_lv)], ()),
# Logic variable type argument that's eventually grounded to a
# mismatched instance type through another logic variable (i.e. both
# arguments are ground to the value `1`, which violates the second
# argument type expectations)
([isinstanceo(q_lv, b_lv), eq(b_lv, 1), eq(b_lv, q_lv)], ()),
# Check a term that's unground by ground enough for this constraint
([isinstanceo(q_lv, tuple), eq([(b_lv,)], q_lv)], ()),
]
for i, (goal, expected) in enumerate(goal_sets):
for goal_ord in permutations(goal):
res = run(0, q_lv, *goal_ord)
assert res == expected, (i, goal_ord)
================================================
FILE: tests/test_core.py
================================================
from collections.abc import Iterator
from itertools import count
from cons import cons
from pytest import raises
from unification import var
from kanren.core import (
conde,
eq,
fail,
ground_order,
ifa,
lall,
lany,
lconj,
lconj_seq,
ldisj,
ldisj_seq,
run,
succeed,
)
def results(g, s=None):
if s is None:
s = dict()
return tuple(g(s))
def test_eq():
x = var()
assert tuple(eq(x, 2)({})) == ({x: 2},)
assert tuple(eq(x, 2)({x: 3})) == ()
def test_lconj_basics():
a, b = var(), var()
res = list(lconj(eq(1, a), eq(2, b))({}))
assert res == [{a: 1, b: 2}]
res = list(lconj(eq(1, a))({}))
assert res == [{a: 1}]
res = list(lconj_seq([])({}))
assert res == [{}]
res = list(lconj(eq(1, a), eq(2, a))({}))
assert res == []
res = list(lconj(eq(1, 2))({}))
assert res == []
res = list(lconj(eq(1, 1))({}))
assert res == [{}]
def gen():
for i in [succeed, succeed]:
yield i
res = list(lconj(gen())({}))
assert res == [{}]
def gen():
return
res = list(lconj_seq([gen()])({}))
assert res == []
def test_ldisj_basics():
a = var()
res = list(ldisj(eq(1, a))({}))
assert res == [{a: 1}]
res = list(ldisj(eq(1, 2))({}))
assert res == []
res = list(ldisj(eq(1, 1))({}))
assert res == [{}]
res = list(ldisj(eq(1, a), eq(1, a))({}))
assert res == [{a: 1}, {a: 1}]
res = list(ldisj(eq(1, a), eq(2, a))({}))
assert res == [{a: 1}, {a: 2}]
res = list(ldisj_seq([])({}))
assert res == [{}]
def gen():
for i in [succeed, succeed]:
yield i
res = list(ldisj(gen())({}))
assert res == [{}, {}]
def test_conde_basics():
a, b = var(), var()
res = list(conde([eq(1, a), eq(2, b)], [eq(1, b), eq(2, a)])({}))
assert res == [{a: 1, b: 2}, {b: 1, a: 2}]
res = list(conde([eq(1, a), eq(2, 1)], [eq(1, b), eq(2, a)])({}))
assert res == [{b: 1, a: 2}]
aa, ab, ba, bb, bc = var(), var(), var(), var(), var()
res = list(
conde(
[eq(1, a), conde([eq(11, aa)], [eq(12, ab)])],
[
eq(1, b),
conde([eq(111, ba), eq(112, bb)], [eq(121, bc)]),
],
)({})
)
assert res == [
{a: 1, aa: 11},
{b: 1, ba: 111, bb: 112},
{a: 1, ab: 12},
{b: 1, bc: 121},
]
res = list(conde([eq(1, 2)], [eq(1, 1)])({}))
assert res == [{}]
assert list(lconj(eq(1, 1))({})) == [{}]
res = list(lconj(conde([eq(1, 2)], [eq(1, 1)]))({}))
assert res == [{}]
res = list(lconj(conde([eq(1, 2)], [eq(1, 1)]), conde([eq(1, 2)], [eq(1, 1)]))({}))
assert res == [{}]
def test_lany():
x = var()
assert len(tuple(lany(eq(x, 2), eq(x, 3))({}))) == 2
assert len(tuple(lany(eq(x, 2), eq(x, 3))({}))) == 2
def test_lall():
x = var()
assert results(lall(eq(x, 2))) == ({x: 2},)
assert results(lall(eq(x, 2), eq(x, 3))) == ()
assert results(lall()) == ({},)
assert run(0, x, lall()) == (x,)
def test_conde():
x = var()
assert results(conde([eq(x, 2)], [eq(x, 3)])) == ({x: 2}, {x: 3})
assert results(conde([eq(x, 2), eq(x, 3)])) == ()
assert set(run(0, x, conde([eq(x, 2)], [eq(x, 3)]))) == {2, 3}
assert set(run(0, x, conde([eq(x, 2), eq(x, 3)]))) == set()
goals = ([eq(x, i)] for i in count()) # infinite number of goals
assert run(1, x, conde(goals)) == (0,)
assert run(1, x, conde(goals)) == (1,)
def test_short_circuit():
def badgoal(s):
raise NotImplementedError()
x = var("x")
tuple(run(5, x, fail, badgoal)) # Does not raise exception
def test_run():
x, y, z = var(), var(), var()
res = run(None, x, eq(x, 1))
assert isinstance(res, Iterator)
assert tuple(res) == (1,)
assert run(1, x, eq(x, 1)) == (1,)
assert run(2, x, eq(x, 1)) == (1,)
assert run(0, x, eq(x, 1)) == (1,)
assert run(1, x, eq(x, (y, z)), eq(y, 3), eq(z, 4)) == ((3, 4),)
assert set(run(2, x, conde([eq(x, 1)], [eq(x, 2)]))) == set((1, 2))
def test_run_output_reify():
x = var()
assert run(0, (1, 2, x), eq(x, 3)) == ((1, 2, 3),)
def test_lanyseq():
x = var()
g = lany((eq(x, i) for i in range(3)))
assert list(g({})) == [{x: 0}, {x: 1}, {x: 2}]
assert list(g({})) == [{x: 0}, {x: 1}, {x: 2}]
# Test lanyseq with an infinite number of goals.
assert set(run(3, x, lany((eq(x, i) for i in count())))) == {0, 1, 2}
assert set(run(3, x, lany((eq(x, i) for i in count())))) == {0, 1, 2}
def test_lall_errors():
class SomeException(Exception):
pass
def bad_relation():
def _bad_relation(s):
raise SomeException("some exception")
return lall(_bad_relation)
with raises(SomeException):
run(0, var(), bad_relation())
def test_dict():
x = var()
assert run(0, x, eq({1: x}, {1: 2})) == (2,)
def test_ifa():
x, y = var(), var()
assert run(0, (x, y), ifa(lall(eq(x, True), eq(y, 1)), eq(y, 2))) == ((True, 1),)
assert run(
0, y, eq(x, False), ifa(lall(eq(x, True), eq(y, 1)), lall(eq(y, 2)))
) == (2,)
assert (
run(
0,
y,
eq(x, False),
ifa(lall(eq(x, True), eq(y, 1)), lall(eq(x, True), eq(y, 2))),
)
== ()
)
assert run(
0,
y,
eq(x, True),
ifa(lall(eq(x, True), eq(y, 1)), lall(eq(x, True), eq(y, 2))),
) == (1,)
def test_ground_order():
x, y, z = var(), var(), var()
assert run(0, x, ground_order((y, [1, z], 1), x)) == ([1, [1, z], y],)
a, b, c = var(), var(), var()
assert run(0, (a, b, c), ground_order((y, [1, z], 1), (a, b, c))) == (
(1, [1, z], y),
)
res = run(0, z, ground_order([cons(x, y), (x, y)], z))
assert res == ([(x, y), cons(x, y)],)
res = run(0, z, ground_order([(x, y), cons(x, y)], z))
assert res == ([(x, y), cons(x, y)],)
================================================
FILE: tests/test_facts.py
================================================
from unification import var
from kanren.core import conde, run
from kanren.facts import Relation, fact, facts
def test_relation():
parent = Relation()
fact(parent, "Homer", "Bart")
fact(parent, "Homer", "Lisa")
fact(parent, "Marge", "Bart")
fact(parent, "Marge", "Lisa")
fact(parent, "Abe", "Homer")
fact(parent, "Jackie", "Marge")
x = var("x")
assert set(run(5, x, parent("Homer", x))) == set(("Bart", "Lisa"))
assert set(run(5, x, parent(x, "Bart"))) == set(("Homer", "Marge"))
def grandparent(x, z):
y = var()
return conde((parent(x, y), parent(y, z)))
assert set(run(5, x, grandparent(x, "Bart"))) == set(("Abe", "Jackie"))
foo = Relation("foo")
assert "foo" in str(foo)
def test_fact():
rel = Relation()
fact(rel, 1, 2)
assert (1, 2) in rel.facts
assert (10, 10) not in rel.facts
facts(rel, (2, 3), (3, 4))
assert (2, 3) in rel.facts
assert (3, 4) in rel.facts
def test_unify_variable_with_itself_should_not_unify():
# Regression test for https://github.com/logpy/logpy/issues/33
valido = Relation()
fact(valido, "a", "b")
fact(valido, "b", "a")
x = var()
assert run(0, x, valido(x, x)) == ()
def test_unify_variable_with_itself_should_unify():
valido = Relation()
fact(valido, 0, 1)
fact(valido, 1, 0)
fact(valido, 1, 1)
x = var()
assert run(0, x, valido(x, x)) == (1,)
def test_unify_tuple():
# Tests that adding facts can be unified with unpacked versions of those
# facts.
valido = Relation()
fact(valido, (0, 1))
fact(valido, (1, 0))
fact(valido, (1, 1))
x = var()
y = var()
assert set(run(0, x, valido((x, y)))) == set([0, 1])
assert set(run(0, (x, y), valido((x, y)))) == set([(0, 1), (1, 0), (1, 1)])
assert run(0, x, valido((x, x))) == (1,)
================================================
FILE: tests/test_goals.py
================================================
import pytest
from cons import cons
from cons.core import ConsPair
from unification import isvar, unify, var
from kanren.core import conde, eq, run
from kanren.goals import (
appendo,
conso,
heado,
itero,
membero,
nullo,
permuteo,
rembero,
tailo,
)
def results(g, s=None):
if s is None:
s = dict()
return tuple(g(s))
def test_heado():
x, y, z = var(), var(), var()
assert (x, 1) in results(heado(x, (1, 2, 3)))[0].items()
assert (x, 1) in results(heado(1, (x, 2, 3)))[0].items()
assert results(heado(x, ())) == ()
assert run(0, x, heado(x, z), conso(1, y, z)) == (1,)
def test_tailo():
x, y, z = var(), var(), var()
assert (x, (2, 3)) in results(tailo(x, (1, 2, 3)))[0].items()
assert (x, ()) in results(tailo(x, (1,)))[0].items()
assert results(tailo(x, ())) == ()
assert run(0, y, tailo(y, z), conso(x, (1, 2), z)) == ((1, 2),)
def test_conso():
x, y, z = var(), var(), var()
assert not results(conso(x, y, ()))
assert results(conso(1, (2, 3), (1, 2, 3)))
assert results(conso(x, (2, 3), (1, 2, 3))) == ({x: 1},)
assert results(conso(1, (2, 3), x)) == ({x: (1, 2, 3)},)
assert results(conso(x, y, (1, 2, 3))) == ({x: 1, y: (2, 3)},)
assert results(conso(x, (2, 3), y)) == ({y: (x, 2, 3)},)
assert run(0, x, conso(x, y, z), eq(z, (1, 2, 3))) == (1,)
# Confirm that custom types are preserved.
class mytuple(tuple):
def __add__(self, other):
return type(self)(super(mytuple, self).__add__(other))
assert type(results(conso(x, mytuple((2, 3)), y))[0][y]) == mytuple
def test_nullo_itero():
x, y, z = var(), var(), var()
q_lv, a_lv = var(), var()
assert run(0, q_lv, conso(1, q_lv, [1]), nullo(q_lv))
assert run(0, q_lv, nullo(q_lv), conso(1, q_lv, [1]))
assert not run(0, q_lv, nullo(q_lv, [], ()))
assert run(0, [a_lv, q_lv], nullo(q_lv, a_lv, default_ConsNull=tuple)) == (
[(), ()],
)
assert run(0, [a_lv, q_lv], nullo(a_lv, [], q_lv)) == ([[], []],)
assert ([],) == run(0, q_lv, nullo(q_lv, []))
assert ([],) == run(0, q_lv, nullo([], q_lv))
assert (None,) == run(0, q_lv, nullo(None, q_lv))
assert (tuple(),) == run(0, q_lv, nullo(tuple(), q_lv))
assert (q_lv,) == run(0, q_lv, nullo(tuple(), tuple()))
assert ([],) == run(0, q_lv, nullo(var(), q_lv))
assert ([],) == run(0, q_lv, nullo(q_lv, var()))
assert ([],) == run(0, q_lv, nullo(q_lv, q_lv))
assert isvar(run(0, y, nullo([]))[0])
assert isvar(run(0, y, nullo(None))[0])
assert run(0, y, nullo(y))[0] == []
assert run(0, y, conso(var(), y, [1]), nullo(y))[0] == []
assert run(0, y, conso(var(), y, (1,)), nullo(y))[0] == ()
assert run(1, y, conso(1, x, y), itero(y))[0] == [1]
assert run(1, y, conso(1, x, y), conso(2, z, x), itero(y))[0] == [1, 2]
# Make sure that the remaining results end in logic variables
res_2 = run(2, y, conso(1, x, y), conso(2, z, x), itero(y))[1]
assert res_2[:2] == [1, 2]
assert isvar(res_2[-1])
def test_membero():
x, y = var(), var()
assert set(run(5, x, membero(x, (1, 2, 3)), membero(x, (2, 3, 4)))) == {2, 3}
assert run(5, x, membero(2, (1, x, 3))) == (2,)
assert run(0, x, membero(1, (1, 2, 3))) == (x,)
assert run(0, x, membero(1, (2, 3))) == ()
g = membero(x, (0, 1, 2))
assert tuple(r[x] for r in g({})) == (0, 1, 2)
def in_cons(x, y):
if issubclass(type(y), ConsPair):
return x == y.car or in_cons(x, y.cdr)
else:
return False
res = run(4, x, membero(1, x))
assert all(in_cons(1, r) for r in res)
res = run(4, (x, y), membero(x, y))
assert all(in_cons(i, r) for i, r in res)
def test_uneval_membero():
x, y = var(), var()
assert set(run(100, x, membero(y, ((1, 2, 3), (4, 5, 6))), membero(x, y))) == {
1,
2,
3,
4,
5,
6,
}
def test_appendo():
q_lv = var()
assert run(0, q_lv, appendo((), (1, 2), (1, 2))) == (q_lv,)
assert run(0, q_lv, appendo((), (1, 2), 1)) == ()
assert run(0, q_lv, appendo((), (1, 2), (1,))) == ()
assert run(0, q_lv, appendo((1, 2), (3, 4), (1, 2, 3, 4))) == (q_lv,)
assert run(5, q_lv, appendo((1, 2, 3), q_lv, (1, 2, 3, 4, 5))) == ((4, 5),)
assert run(5, q_lv, appendo(q_lv, (4, 5), (1, 2, 3, 4, 5))) == ((1, 2, 3),)
assert run(5, q_lv, appendo((1, 2, 3), (4, 5), q_lv)) == ((1, 2, 3, 4, 5),)
q_lv, r_lv = var(), var()
assert ([1, 2, 3, 4],) == run(0, q_lv, appendo([1, 2], [3, 4], q_lv))
assert ([3, 4],) == run(0, q_lv, appendo([1, 2], q_lv, [1, 2, 3, 4]))
assert ([1, 2],) == run(0, q_lv, appendo(q_lv, [3, 4], [1, 2, 3, 4]))
expected_res = set(
[
((), (1, 2, 3, 4)),
((1,), (2, 3, 4)),
((1, 2), (3, 4)),
((1, 2, 3), (4,)),
((1, 2, 3, 4), ()),
]
)
assert expected_res == set(run(0, (q_lv, r_lv), appendo(q_lv, r_lv, (1, 2, 3, 4))))
res = run(3, (q_lv, r_lv), appendo(q_lv, [3, 4], r_lv))
assert len(res) == 3
assert any(len(a) > 0 and isvar(a[0]) for a, b in res)
assert all(a + [3, 4] == b for a, b in res)
res = run(0, (q_lv, r_lv), appendo([3, 4], q_lv, r_lv))
assert len(res) == 2
assert ([], [3, 4]) == res[0]
assert all(
type(v) == cons for v in unify((var(), cons(3, 4, var())), res[1]).values()
)
@pytest.mark.skip("Misspecified test")
def test_appendo_reorder():
# XXX: This test generates goal conjunctions that are non-terminating given
# the specified goal ordering. More specifically, it generates
# `lall(appendo(x, y, w), appendo(w, z, ()))`, for which the first
# `appendo` produces an infinite stream of results and the second
# necessarily fails for all values of the first `appendo` yielding
# non-empty `w` unifications.
#
# The only reason it worked before is the `EarlyGoalError`
# and it's implicit goal reordering, which made this case an out-of-place
# test for a goal reordering feature that has nothing to do with `appendo`.
# Furthermore, the `EarlyGoalError` mechanics do *not* fix this general
# problem, and it's trivial to generate an equivalent situation in which
# an `EarlyGoalError` is never thrown.
#
# In other words, it seems like a nice side effect of `EarlyGoalError`, but
# it's actually a very costly approach that masks a bigger issue; one that
# all miniKanren programmers need to think about when developing.
x, y, z, w = var(), var(), var(), var()
for t in [tuple(range(i)) for i in range(5)]:
print(t)
for xi, yi in run(0, (x, y), appendo(x, y, t)):
assert xi + yi == t
results = run(2, (x, y, z, w), appendo(x, y, w), appendo(w, z, t))
for xi, yi, zi, wi in results:
assert xi + yi + zi == t
def test_rembero():
q_lv = var()
assert ([],) == run(0, q_lv, rembero(1, [1], q_lv))
assert ([], [1]) == run(0, q_lv, rembero(1, q_lv, []))
expected_res = (
[5, 1, 2, 3, 4],
[1, 5, 2, 3, 4],
[1, 2, 5, 3, 4],
[1, 2, 3, 5, 4],
[1, 2, 3, 4],
[1, 2, 3, 4, 5],
)
assert expected_res == run(0, q_lv, rembero(5, q_lv, [1, 2, 3, 4]))
def test_permuteo():
from itertools import permutations
a_lv = var()
q_lv = var()
class Blah:
def __hash__(self):
raise TypeError()
# An unhashable sequence with an unhashable object in it
obj_1 = [Blah()]
assert results(permuteo((1, 2), (2, 1))) == ({},)
assert results(permuteo((1, obj_1), (obj_1, 1))) == ({},)
assert results(permuteo([1, 2], [2, 1])) == ({},)
assert results(permuteo((1, 2, 2), (2, 1, 2))) == ({},)
# (1, obj_1, a_lv) == (1, obj_1, a_lv) ==> {a_lv: a_lv}
# (1, obj_1, a_lv) == (1, a_lv, obj_1) ==> {a_lv: obj_1}
# (1, obj_1, a_lv) == (a_lv, obj_1, 1) ==> {a_lv: 1}
assert run(0, a_lv, permuteo((1, obj_1, a_lv), (obj_1, a_lv, 1))) == (
1,
a_lv,
obj_1,
)
assert not results(permuteo((1, 2), (2, 1, 2)))
assert not results(permuteo((1, 2), (2, 1, 2)))
assert not results(permuteo((1, 2, 3), (2, 1, 2)))
assert not results(permuteo((1, 2, 1), (2, 1, 2)))
assert not results(permuteo([1, 2, 1], (2, 1, 2)))
x = var()
assert set(run(0, x, permuteo(x, (1, 2, 2)))) == set(
((1, 2, 2), (2, 1, 2), (2, 2, 1))
)
q_lv = var()
assert run(0, q_lv, permuteo((1, 2, 3), (q_lv, 2, 1))) == (3,)
assert run(0, q_lv, permuteo([1, 2, 3], [3, 2, 1]))
assert run(0, q_lv, permuteo((1, 2, 3), (3, 2, 1)))
assert run(0, q_lv, permuteo([1, 2, 3], [2, 1])) == ()
assert run(0, q_lv, permuteo([1, 2, 3], (3, 2, 1))) == ()
col = [1, 2, 3]
exp_res = set(tuple(i) for i in permutations(col))
# The first term is ground
res = run(0, q_lv, permuteo(col, q_lv))
assert all(type(r) == type(col) for r in res)
res = set(tuple(r) for r in res)
assert res == exp_res
# The second term is ground
res = run(0, q_lv, permuteo(q_lv, col))
assert all(type(r) == type(col) for r in res)
res = set(tuple(r) for r in res)
assert res == exp_res
a_lv = var()
# Neither terms are ground
bi_res = run(5, [q_lv, a_lv], permuteo(q_lv, a_lv))
assert bi_res[0] == [[], []]
bi_var_1 = bi_res[1][0][0]
assert isvar(bi_var_1)
assert bi_res[1][0] == bi_res[1][1] == [bi_var_1]
bi_var_2 = bi_res[2][0][1]
assert isvar(bi_var_2) and bi_var_1 is not bi_var_2
assert bi_res[2][0] == bi_res[2][1] == [bi_var_1, bi_var_2]
assert bi_res[3][0] != bi_res[3][1] == [bi_var_2, bi_var_1]
bi_var_3 = bi_res[4][0][2]
assert bi_res[4][0] == bi_res[4][1] == [bi_var_1, bi_var_2, bi_var_3]
assert run(0, x, permuteo((1, 2), (1, 2), no_ident=True)) == ()
assert run(0, True, permuteo((1, 2), (2, 1), no_ident=True)) == (True,)
assert run(0, x, permuteo((), x, no_ident=True)) == ()
assert run(0, x, permuteo(x, (), no_ident=True)) == ()
assert run(0, x, permuteo((1,), x, no_ident=True)) == ()
assert run(0, x, permuteo(x, (1,), no_ident=True)) == ()
assert (1, 2, 3) not in run(0, x, permuteo((1, 2, 3), x, no_ident=True))
assert (1, 2, 3) not in run(0, x, permuteo(x, (1, 2, 3), no_ident=True))
y = var()
assert all(a != b for a, b in run(6, [x, y], permuteo(x, y, no_ident=True)))
def eq_permute(x, y):
return conde([eq(x, y)], [permuteo(a, b) for a, b in zip(x, y)])
assert run(
0, True, permuteo((1, (2, 3)), ((3, 2), 1), inner_eq=eq_permute, no_ident=True)
) == (True,)
================================================
FILE: tests/test_graph.py
================================================
from functools import partial
from math import exp, log
from numbers import Real
from operator import add, mul
import pytest
import toolz
from cons import cons
from etuples.core import ExpressionTuple, etuple
from unification import isvar, reify, unify, var
from kanren import conde, eq, lall, run
from kanren.constraints import isinstanceo
from kanren.graph import eq_length, map_anyo, mapo, reduceo, walko
class OrderedFunction(object):
def __init__(self, func):
self.func = func
def __call__(self, *args, **kwargs):
return self.func(*args, **kwargs)
@property
def __name__(self):
return self.func.__name__
def __lt__(self, other):
return self.__name__ < getattr(other, "__name__", str(other))
def __gt__(self, other):
return self.__name__ > getattr(other, "__name__", str(other))
def __repr__(self):
return self.__name__
add = OrderedFunction(add)
mul = OrderedFunction(mul)
log = OrderedFunction(log)
exp = OrderedFunction(exp)
ExpressionTuple.__lt__ = (
lambda self, other: self < (other,)
if isinstance(other, int)
else tuple(self) < tuple(other)
)
ExpressionTuple.__gt__ = (
lambda self, other: self > (other,)
if isinstance(other, int)
else tuple(self) > tuple(other)
)
def single_math_reduceo(expanded_term, reduced_term):
"""Construct a goal for some simple math reductions."""
x_lv = var()
return lall(
isinstanceo(x_lv, Real),
isinstanceo(x_lv, ExpressionTuple),
conde(
[
eq(expanded_term, etuple(add, x_lv, x_lv)),
eq(reduced_term, etuple(mul, 2, x_lv)),
],
[eq(expanded_term, etuple(log, etuple(exp, x_lv))), eq(reduced_term, x_lv)],
),
)
math_reduceo = partial(reduceo, single_math_reduceo)
term_walko = partial(
walko,
rator_goal=eq,
null_type=ExpressionTuple,
map_rel=partial(map_anyo, null_res=False),
)
def test_basics():
x_lv = var()
res = unify(
etuple(log, etuple(exp, etuple(log, 1))), etuple(log, etuple(exp, x_lv))
)
assert res[x_lv] == etuple(log, 1)
def test_reduceo():
q_lv = var()
# Reduce/forward
res = run(0, q_lv, math_reduceo(etuple(log, etuple(exp, etuple(log, 1))), q_lv))
assert len(res) == 1
assert res[0] == etuple(log, 1)
res = run(
0,
q_lv,
math_reduceo(etuple(log, etuple(exp, etuple(log, etuple(exp, 1)))), q_lv),
)
assert res[0] == 1
assert res[1] == etuple(log, etuple(exp, 1))
# Expand/backward
res = run(3, q_lv, math_reduceo(q_lv, 1))
assert res[0] == etuple(log, etuple(exp, 1))
assert res[1] == etuple(log, etuple(exp, etuple(log, etuple(exp, 1))))
def test_mapo():
q_lv = var()
def blah(x, y):
return conde([eq(x, 1), eq(y, "a")], [eq(x, 3), eq(y, "b")])
assert run(0, q_lv, mapo(blah, [], q_lv)) == ([],)
assert run(0, q_lv, mapo(blah, [1, 2, 3], q_lv)) == ()
assert run(0, q_lv, mapo(blah, [1, 1, 3], q_lv)) == (["a", "a", "b"],)
assert run(0, q_lv, mapo(blah, q_lv, ["a", "a", "b"])) == ([1, 1, 3],)
exp_res = (
[[], []],
[[1], ["a"]],
[[3], ["b"]],
[[1, 1], ["a", "a"]],
[[3, 1], ["b", "a"]],
)
a_lv = var()
res = run(5, [q_lv, a_lv], mapo(blah, q_lv, a_lv))
assert res == exp_res
def test_eq_length():
q_lv = var()
res = run(0, q_lv, eq_length([1, 2, 3], q_lv))
assert len(res) == 1 and len(res[0]) == 3 and all(isvar(q) for q in res[0])
res = run(0, q_lv, eq_length(q_lv, [1, 2, 3]))
assert len(res) == 1 and len(res[0]) == 3 and all(isvar(q) for q in res[0])
res = run(0, q_lv, eq_length(cons(1, q_lv), [1, 2, 3]))
assert len(res) == 1 and len(res[0]) == 2 and all(isvar(q) for q in res[0])
v_lv = var()
res = run(3, (q_lv, v_lv), eq_length(q_lv, v_lv, default_ConsNull=tuple))
assert len(res) == 3 and all(
isinstance(a, tuple)
and len(a) == len(b)
and (len(a) == 0 or a != b)
and all(isvar(r) for r in a)
for a, b in res
)
def test_map_anyo_types():
"""Make sure that `map_anyo` preserves the types between its arguments."""
q_lv = var()
res = run(1, q_lv, map_anyo(lambda x, y: eq(x, y), [1], q_lv))
assert res[0] == [1]
res = run(1, q_lv, map_anyo(lambda x, y: eq(x, y), (1,), q_lv))
assert res[0] == (1,)
res = run(1, q_lv, map_anyo(lambda x, y: eq(x, y), q_lv, (1,)))
assert res[0] == (1,)
res = run(1, q_lv, map_anyo(lambda x, y: eq(x, y), q_lv, [1]))
assert res[0] == [1]
res = run(1, q_lv, map_anyo(lambda x, y: eq(x, y), [1, 2], [1, 2]))
assert len(res) == 1
res = run(1, q_lv, map_anyo(lambda x, y: eq(x, y), [1, 2], [1, 3]))
assert len(res) == 0
res = run(1, q_lv, map_anyo(lambda x, y: eq(x, y), [1, 2], (1, 2)))
assert len(res) == 0
def test_map_anyo_misc():
q_lv = var("q")
res = run(0, q_lv, map_anyo(eq, [1, 2, 3], [1, 2, 3]))
# TODO: Remove duplicate results
assert len(res) == 7
res = run(0, q_lv, map_anyo(eq, [1, 2, 3], [1, 3, 3]))
assert len(res) == 0
def one_to_threeo(x, y):
return conde([eq(x, 1), eq(y, 3)])
res = run(0, q_lv, map_anyo(one_to_threeo, [1, 2, 4, 1, 4, 1, 1], q_lv))
assert res[0] == [3, 2, 4, 3, 4, 3, 3]
assert (
len(run(4, q_lv, map_anyo(math_reduceo, [etuple(mul, 2, var("x"))], q_lv))) == 0
)
test_res = run(4, q_lv, map_anyo(math_reduceo, [etuple(add, 2, 2), 1], q_lv))
assert test_res == ([etuple(mul, 2, 2), 1],)
test_res = run(4, q_lv, map_anyo(math_reduceo, [1, etuple(add, 2, 2)], q_lv))
assert test_res == ([1, etuple(mul, 2, 2)],)
test_res = run(4, q_lv, map_anyo(math_reduceo, q_lv, var("z")))
assert all(isinstance(r, list) for r in test_res)
test_res = run(4, q_lv, map_anyo(math_reduceo, q_lv, var("z"), tuple))
assert all(isinstance(r, tuple) for r in test_res)
x, y, z = var(), var(), var()
def test_bin(a, b):
return conde([eq(a, 1), eq(b, 2)])
res = run(10, (x, y), map_anyo(test_bin, x, y, null_type=tuple))
exp_res_form = (
((1,), (2,)),
((x, 1), (x, 2)),
((1, 1), (2, 2)),
((x, y, 1), (x, y, 2)),
((1, x), (2, x)),
((x, 1, 1), (x, 2, 2)),
((1, 1, 1), (2, 2, 2)),
((x, y, z, 1), (x, y, z, 2)),
((1, x, 1), (2, x, 2)),
((x, 1, y), (x, 2, y)),
)
for a, b in zip(res, exp_res_form):
s = unify(a, b)
assert s is not False
assert all(isvar(i) for i in reify((x, y, z), s))
@pytest.mark.parametrize(
"test_input, test_output",
[
([], ()),
([1], ()),
(
[
etuple(add, 1, 1),
],
([etuple(mul, 2, 1)],),
),
([1, etuple(add, 1, 1)], ([1, etuple(mul, 2, 1)],)),
([etuple(add, 1, 1), 1], ([etuple(mul, 2, 1), 1],)),
(
[etuple(mul, 2, 1), etuple(add, 1, 1), 1],
([etuple(mul, 2, 1), etuple(mul, 2, 1), 1],),
),
(
[
etuple(add, 1, 1),
etuple(log, etuple(exp, 5)),
],
(
[etuple(mul, 2, 1), 5],
[etuple(add, 1, 1), 5],
[etuple(mul, 2, 1), etuple(log, etuple(exp, 5))],
),
),
],
)
def test_map_anyo(test_input, test_output):
"""Test `map_anyo` with fully ground terms (i.e. no logic variables)."""
q_lv = var()
test_res = run(
0,
q_lv,
map_anyo(math_reduceo, test_input, q_lv),
)
assert len(test_res) == len(test_output)
test_res = sorted(test_res)
test_output = sorted(test_output)
# Make sure the first result matches.
# TODO: This is fairly implementation-specific (i.e. dependent on the order
# in which `condeseq` returns results).
if len(test_output) > 0:
assert test_res[0] == test_output[0]
# Make sure all the results match.
# TODO: If we want to avoid fixing the output order, convert the lists to
# tuples and add everything to a set, then compare.
assert test_res == test_output
def test_map_anyo_reverse():
"""Test `map_anyo` in "reverse" (i.e. specify the reduced form and generate the un-reduced form).""" # noqa: E501
# Unbounded reverse
q_lv = var()
rev_input = [etuple(mul, 2, 1)]
test_res = run(4, q_lv, map_anyo(math_reduceo, q_lv, rev_input))
assert test_res == (
[etuple(add, 1, 1)],
[etuple(log, etuple(exp, etuple(add, 1, 1)))],
# [etuple(log, etuple(exp, etuple(mul, 2, 1)))],
[etuple(log, etuple(exp, etuple(log, etuple(exp, etuple(add, 1, 1)))))],
# [etuple(log, etuple(exp, etuple(log, etuple(exp, etuple(mul, 2, 1)))))],
[
etuple(
log,
etuple(
exp,
etuple(
log, etuple(exp, etuple(log, etuple(exp, etuple(add, 1, 1))))
),
),
)
],
)
# Guided reverse
test_res = run(
4,
q_lv,
map_anyo(math_reduceo, [etuple(add, q_lv, 1)], [etuple(mul, 2, 1)]),
)
assert test_res == (1,)
def test_walko_misc():
q_lv = var(prefix="q")
expr = etuple(add, etuple(mul, 2, 1), etuple(add, 1, 1))
res = run(0, q_lv, walko(eq, expr, expr))
# TODO: Remove duplicates
assert len(res) == 162
expr2 = etuple(add, etuple(mul, 2, 1), etuple(add, 2, 1))
res = run(0, q_lv, walko(eq, expr, expr2))
assert len(res) == 0
def one_to_threeo(x, y):
return conde([eq(x, 1), eq(y, 3)])
res = run(
1,
q_lv,
walko(
one_to_threeo,
[1, [1, 2, 4], 2, [[4, 1, 1]], 1],
q_lv,
),
)
assert res == ([3, [3, 2, 4], 2, [[4, 3, 3]], 3],)
assert run(2, q_lv, walko(eq, q_lv, q_lv, null_type=ExpressionTuple)) == (
q_lv,
etuple(),
)
res = run(
1,
q_lv,
walko(
one_to_threeo,
etuple(
add,
1,
etuple(mul, etuple(add, 1, 2), 1),
etuple(add, etuple(add, 1, 2), 2),
),
q_lv,
# Only descend into `add` terms
rator_goal=lambda x, y: lall(eq(x, add), eq(y, add)),
),
)
assert res == (
etuple(
add, 3, etuple(mul, etuple(add, 1, 2), 1), etuple(add, etuple(add, 3, 2), 2)
),
)
@pytest.mark.parametrize(
"test_input, test_output",
[
(1, ()),
(etuple(add, 1, 1), (etuple(mul, 2, 1),)),
(
# (2 * 1) + (1 + 1)
etuple(add, etuple(mul, 2, 1), etuple(add, 1, 1)),
(
# 2 * (2 * 1)
etuple(mul, 2, etuple(mul, 2, 1)),
# (2 * 1) + (2 * 1)
etuple(add, etuple(mul, 2, 1), etuple(mul, 2, 1)),
),
),
(
# (log(exp(2)) * 1) + (1 + 1)
etuple(add, etuple(mul, etuple(log, etuple(exp, 2)), 1), etuple(add, 1, 1)),
(
# 2 * (2 * 1)
etuple(mul, 2, etuple(mul, 2, 1)),
# (2 * 1) + (2 * 1)
etuple(add, etuple(mul, 2, 1), etuple(mul, 2, 1)),
# (log(exp(2)) * 1) + (2 * 1)
etuple(
add, etuple(mul, etuple(log, etuple(exp, 2)), 1), etuple(mul, 2, 1)
),
etuple(add, etuple(mul, 2, 1), etuple(add, 1, 1)),
),
),
],
)
def test_walko(test_input, test_output):
"""Test `walko` with fully ground terms (i.e. no logic variables)."""
q_lv = var()
term_walko_fp = partial(reduceo, partial(term_walko, single_math_reduceo))
test_res = run(
len(test_output),
q_lv,
term_walko_fp(test_input, q_lv),
results_filter=toolz.unique,
)
assert len(test_res) == len(test_output)
test_res = sorted(test_res)
test_output = sorted(test_output)
# Make sure the first result matches.
if len(test_output) > 0:
assert test_res[0] == test_output[0]
# Make sure all the results match.
assert set(test_res) == set(test_output)
def test_walko_reverse():
"""Test `walko` in "reverse" (i.e. specify the reduced form and generate the un-reduced form).""" # noqa: E501
q_lv = var("q")
test_res = run(2, q_lv, term_walko(math_reduceo, q_lv, 5))
assert test_res == (
etuple(log, etuple(exp, 5)),
etuple(log, etuple(exp, etuple(log, etuple(exp, 5)))),
)
assert all(e.eval_obj == 5.0 for e in test_res)
# Make sure we get some variety in the results
test_res = run(2, q_lv, term_walko(math_reduceo, q_lv, etuple(mul, 2, 5)))
assert test_res == (
# Expansion of the term's root
etuple(add, 5, 5),
# Expansion in the term's arguments
etuple(mul, etuple(log, etuple(exp, 2)), etuple(log, etuple(exp, 5))),
# Two step expansion at the root
# etuple(log, etuple(exp, etuple(add, 5, 5))),
# Expansion into a sub-term
# etuple(mul, 2, etuple(log, etuple(exp, 5)))
)
assert all(e.eval_obj == 10.0 for e in test_res)
r_lv = var("r")
test_res = run(4, [q_lv, r_lv], term_walko(math_reduceo, q_lv, r_lv))
expect_res = (
[etuple(add, 1, 1), etuple(mul, 2, 1)],
[etuple(log, etuple(exp, etuple(add, 1, 1))), etuple(mul, 2, 1)],
[etuple(), etuple()],
[
etuple(add, etuple(mul, 2, 1), etuple(add, 1, 1)),
etuple(mul, 2, etuple(mul, 2, 1)),
],
)
assert list(
unify(a1, a2) and unify(b1, b2)
for [a1, b1], [a2, b2] in zip(test_res, expect_res)
)
================================================
FILE: tests/test_sudoku.py
================================================
"""
Based off
https://github.com/holtchesley/embedded-logic/blob/master/kanren/sudoku.ipynb
"""
import pytest
from unification import var
from kanren import run
from kanren.core import lall
from kanren.goals import permuteq
DIGITS = tuple(range(1, 10))
def get_rows(board):
return tuple(board[i : i + 9] for i in range(0, len(board), 9))
def get_columns(rows):
return tuple(tuple(x[i] for x in rows) for i in range(0, 9))
def get_square(rows, x, y):
return tuple(rows[xi][yi] for xi in range(x, x + 3) for yi in range(y, y + 3))
def get_squares(rows):
return tuple(get_square(rows, x, y) for x in range(0, 9, 3) for y in range(0, 9, 3))
def vars(hints):
def helper(h):
if h in DIGITS:
return h
else:
return var()
return tuple(helper(x) for x in hints)
def all_numbers(coll):
return permuteq(coll, DIGITS)
def sudoku_solver(hints):
variables = vars(hints)
rows = get_rows(variables)
cols = get_columns(rows)
sqs = get_squares(rows)
return run(
1,
variables,
lall(*(all_numbers(r) for r in rows)),
lall(*(all_numbers(c) for c in cols)),
lall(*(all_numbers(s) for s in sqs)),
)
# fmt: off
def test_missing_one_entry():
example_board = (
5, 3, 4, 6, 7, 8, 9, 1, 2,
6, 7, 2, 1, 9, 5, 3, 4, 8,
1, 9, 8, 3, 4, 2, 5, 6, 7,
8, 5, 9, 7, 6, 1, 4, 2, 3,
4, 2, 6, 8, 5, 3, 7, 9, 1,
7, 1, 3, 9, 2, 4, 8, 5, 6,
9, 6, 1, 5, 3, 7, 2, 8, 4,
2, 8, 7, 4, 1, 9, 6, 3, 5,
3, 4, 5, 2, 8, 6, 0, 7, 9,
)
expected_solution = (
5, 3, 4, 6, 7, 8, 9, 1, 2,
6, 7, 2, 1, 9, 5, 3, 4, 8,
1, 9, 8, 3, 4, 2, 5, 6, 7,
8, 5, 9, 7, 6, 1, 4, 2, 3,
4, 2, 6, 8, 5, 3, 7, 9, 1,
7, 1, 3, 9, 2, 4, 8, 5, 6,
9, 6, 1, 5, 3, 7, 2, 8, 4,
2, 8, 7, 4, 1, 9, 6, 3, 5,
3, 4, 5, 2, 8, 6, 1, 7, 9,
)
assert sudoku_solver(example_board)[0] == expected_solution
# fmt: off
def test_missing_complex_board():
example_board = (
5, 3, 4, 6, 7, 8, 9, 0, 2,
6, 7, 2, 0, 9, 5, 3, 4, 8,
0, 9, 8, 3, 4, 2, 5, 6, 7,
8, 5, 9, 7, 6, 0, 4, 2, 3,
4, 2, 6, 8, 5, 3, 7, 9, 0,
7, 0, 3, 9, 2, 4, 8, 5, 6,
9, 6, 0, 5, 3, 7, 2, 8, 4,
2, 8, 7, 4, 0, 9, 6, 3, 5,
3, 4, 5, 2, 8, 6, 0, 7, 9,
)
expected_solution = (
5, 3, 4, 6, 7, 8, 9, 1, 2,
6, 7, 2, 1, 9, 5, 3, 4, 8,
1, 9, 8, 3, 4, 2, 5, 6, 7,
8, 5, 9, 7, 6, 1, 4, 2, 3,
4, 2, 6, 8, 5, 3, 7, 9, 1,
7, 1, 3, 9, 2, 4, 8, 5, 6,
9, 6, 1, 5, 3, 7, 2, 8, 4,
2, 8, 7, 4, 1, 9, 6, 3, 5,
3, 4, 5, 2, 8, 6, 1, 7, 9,
)
assert sudoku_solver(example_board)[0] == expected_solution
# fmt: off
def test_unsolvable():
example_board = (
5, 3, 4, 6, 7, 8, 9, 1, 2,
6, 7, 2, 1, 9, 5, 9, 4, 8, # Note column 7 has two 9's.
1, 9, 8, 3, 4, 2, 5, 6, 7,
8, 5, 9, 7, 6, 1, 4, 2, 3,
4, 2, 6, 8, 5, 3, 7, 9, 1,
7, 1, 3, 9, 2, 4, 8, 5, 6,
9, 6, 1, 5, 3, 7, 2, 8, 4,
2, 8, 7, 4, 1, 9, 6, 3, 5,
3, 4, 5, 2, 8, 6, 0, 7, 9,
)
assert sudoku_solver(example_board) == ()
# fmt: off
@pytest.mark.skip(reason="Currently too slow!")
def test_many_missing_elements():
example_board = (
5, 3, 0, 0, 7, 0, 0, 0, 0,
6, 0, 0, 1, 9, 5, 0, 0, 0,
0, 9, 8, 0, 0, 0, 0, 6, 0,
8, 0, 0, 0, 6, 0, 0, 0, 3,
4, 0, 0, 8, 0, 3, 0, 0, 1,
7, 0, 0, 0, 2, 0, 0, 0, 6,
0, 6, 0, 0, 0, 0, 2, 8, 0,
0, 0, 0, 4, 1, 9, 0, 0, 5,
0, 0, 0, 0, 8, 0, 0, 7, 9
)
assert sudoku_solver(example_board)[0] == (
5, 3, 4, 6, 7, 8, 9, 1, 2,
6, 7, 2, 1, 9, 5, 3, 4, 8,
1, 9, 8, 3, 4, 2, 5, 6, 7,
8, 5, 9, 7, 6, 1, 4, 2, 3,
4, 2, 6, 8, 5, 3, 7, 9, 1,
7, 1, 3, 9, 2, 4, 8, 5, 6,
9, 6, 1, 5, 3, 7, 2, 8, 4,
2, 8, 7, 4, 1, 9, 6, 3, 5,
3, 4, 5, 2, 8, 6, 1, 7, 9
)
# fmt: off
@pytest.mark.skip(reason="Currently too slow!")
def test_websudoku_easy():
# A sudoku from websudoku.com.
example_board = (
0, 0, 8, 0, 0, 6, 0, 0, 0,
0, 0, 4, 3, 7, 9, 8, 0, 0,
5, 7, 0, 0, 1, 0, 3, 2, 0,
0, 5, 2, 0, 0, 7, 0, 0, 0,
0, 6, 0, 5, 9, 8, 0, 4, 0,
0, 0, 0, 4, 0, 0, 5, 7, 0,
0, 2, 1, 0, 4, 0, 0, 9, 8,
0, 0, 9, 6, 2, 3, 1, 0, 0,
0, 0, 0, 9, 0, 0, 7, 0, 0,
)
assert sudoku_solver(example_board) == (
9, 3, 8, 2, 5, 6, 4, 1, 7,
2, 1, 4, 3, 7, 9, 8, 6, 5,
5, 7, 6, 8, 1, 4, 3, 2, 9,
4, 5, 2, 1, 3, 7, 9, 8, 6,
1, 6, 7, 5, 9, 8, 2, 4, 3,
8, 9, 3, 4, 6, 2, 5, 7, 1,
3, 2, 1, 7, 4, 5, 6, 9, 8,
7, 8, 9, 6, 2, 3, 1, 5, 4,
6, 4, 5, 9, 8, 1, 7, 3, 2
)
================================================
FILE: tests/test_term.py
================================================
from cons import cons
from etuples import etuple
from unification import reify, unify, var
from kanren.core import run
from kanren.term import applyo, arguments, operator, term, unifiable_with_term
@unifiable_with_term
class Node(object):
def __init__(self, op, args):
self.op = op
self.args = args
def __eq__(self, other):
return (
type(self) == type(other)
and self.op == other.op
and self.args == other.args
)
def __hash__(self):
return hash((type(self), self.op, self.args))
def __str__(self):
return "%s(%s)" % (self.op.name, ", ".join(map(str, self.args)))
__repr__ = __str__
class Operator(object):
def __init__(self, name):
self.name = name
Add = Operator("add")
Mul = Operator("mul")
def add(*args):
return Node(Add, args)
def mul(*args):
return Node(Mul, args)
class Op(object):
def __init__(self, name):
self.name = name
@arguments.register(Node)
def arguments_Node(t):
return t.args
@operator.register(Node)
def operator_Node(t):
return t.op
@term.register(Operator, (list, tuple))
def term_Op(op, args):
return Node(op, args)
def test_applyo():
x = var()
assert run(0, x, applyo("add", (1, 2, 3), x)) == (("add", 1, 2, 3),)
assert run(0, x, applyo(x, (1, 2, 3), ("add", 1, 2, 3))) == ("add",)
assert run(0, x, applyo("add", x, ("add", 1, 2, 3))) == ((1, 2, 3),)
a_lv, b_lv, c_lv = var(), var(), var()
from operator import add
assert run(0, c_lv, applyo(add, (1, 2), c_lv)) == (3,)
assert run(0, c_lv, applyo(add, etuple(1, 2), c_lv)) == (3,)
assert run(0, c_lv, applyo(add, a_lv, c_lv)) == (cons(add, a_lv),)
for obj in (
(1, 2, 3),
(add, 1, 2),
[1, 2, 3],
[add, 1, 2],
etuple(1, 2, 3),
etuple(add, 1, 2),
):
o_rator, o_rands = operator(obj), arguments(obj)
assert run(0, a_lv, applyo(o_rator, o_rands, a_lv)) == (term(o_rator, o_rands),)
# Just acts like `conso` here
assert run(0, a_lv, applyo(o_rator, a_lv, obj)) == (arguments(obj),)
assert run(0, a_lv, applyo(a_lv, o_rands, obj)) == (operator(obj),)
# Just acts like `conso` here, too
assert run(0, c_lv, applyo(a_lv, b_lv, c_lv)) == (cons(a_lv, b_lv),)
# with pytest.raises(ConsError):
assert run(0, a_lv, applyo(a_lv, b_lv, object())) == ()
assert run(0, a_lv, applyo(1, 2, a_lv)) == ()
def test_applyo_object():
x = var()
assert run(0, x, applyo(Add, (1, 2, 3), x)) == (add(1, 2, 3),)
assert run(0, x, applyo(x, (1, 2, 3), add(1, 2, 3))) == (Add,)
assert run(0, x, applyo(Add, x, add(1, 2, 3))) == ((1, 2, 3),)
def test_unifiable_with_term():
add = Operator("add")
t = Node(add, (1, 2))
assert arguments(t) == (1, 2)
assert operator(t) == add
assert term(operator(t), arguments(t)) == t
x = var()
s = unify(Node(add, (1, x)), Node(add, (1, 2)), {})
assert s == {x: 2}
assert reify(Node(add, (1, x)), s) == Node(add, (1, 2))
================================================
FILE: tests/test_util.py
================================================
from pytest import raises
from kanren.util import (
FlexibleSet,
dicthash,
groupsizes,
hashable,
intersection,
multihash,
unique,
)
def test_hashable():
assert hashable(2)
assert hashable((2, 3))
assert not hashable({1: 2})
assert not hashable((1, {2: 3}))
def test_unique():
assert tuple(unique((1, 2, 3))) == (1, 2, 3)
assert tuple(unique((1, 2, 1, 3))) == (1, 2, 3)
def test_unique_dict():
assert tuple(unique(({1: 2}, {2: 3}), key=dicthash)) == ({1: 2}, {2: 3})
assert tuple(unique(({1: 2}, {1: 2}), key=dicthash)) == ({1: 2},)
def test_unique_not_hashable():
assert tuple(unique(([1], [1])))
def test_multihash():
inputs = 2, (1, 2), [1, 2], {1: 2}, (1, [2]), slice(1, 2)
assert all(isinstance(multihash(i), int) for i in inputs)
def test_intersection():
a, b, c = (1, 2, 3, 4), (2, 3, 4, 5), (3, 4, 5, 6)
assert tuple(intersection(a, b, c)) == (3, 4)
def test_groupsizes():
assert set(groupsizes(4, 2)) == set(((1, 3), (2, 2), (3, 1)))
assert set(groupsizes(5, 2)) == set(((1, 4), (2, 3), (3, 2), (4, 1)))
assert set(groupsizes(4, 1)) == set([(4,)])
assert set(groupsizes(4, 4)) == set([(1, 1, 1, 1)])
def test_flexibleset():
test_set = set([1, 2, 4])
test_fs = FlexibleSet([1, 2, 4])
assert test_fs.set == test_set
assert test_fs.list == []
test_fs.discard(3)
test_set.discard(3)
assert test_fs == test_set
test_fs.discard(2)
test_set.discard(2)
with raises(KeyError):
test_set.remove(3)
with raises(KeyError):
test_fs.remove(3)
res_fs = test_fs.pop()
res_set = test_set.pop()
assert res_fs == res_set and test_fs == test_set
test_fs_2 = FlexibleSet([1, 2, [3, 4], {"a"}])
assert len(test_fs_2) == 4
assert test_fs_2.set == {1, 2}
assert test_fs_2.list == [[3, 4], {"a"}]
test_fs_2.add(2)
test_fs_2.add([3, 4])
test_fs_2.add({"a"})
assert test_fs_2.set == {1, 2}
assert test_fs_2.list == [[3, 4], {"a"}]
assert 1 in test_fs_2
assert {"a"} in test_fs_2
assert [3, 4] in test_fs_2
assert test_fs_2 != test_set
test_fs_2.discard(3)
test_fs_2.discard([3, 4])
assert test_fs_2.set == {1, 2}
assert test_fs_2.list == [{"a"}]
with raises(KeyError):
test_fs_2.remove(3)
with raises(KeyError):
test_fs_2.remove([1, 4])
test_fs_2.remove({"a"})
assert test_fs_2.set == {1, 2}
assert test_fs_2.list == []
test_fs_2.add([5])
pop_var = test_fs_2.pop()
assert pop_var not in test_fs_2.set
assert test_fs_2.list == [[5]]
pop_var = test_fs_2.pop()
assert test_fs_2.set == set()
assert test_fs_2.list == [[5]]
assert [5] == test_fs_2.pop()
assert test_fs_2.set == set()
assert test_fs_2.list == []
with raises(KeyError):
test_fs_2.pop()
assert FlexibleSet([1, 2, [3, 4], {"a"}]) == FlexibleSet([1, 2, [3, 4], {"a"}])
assert FlexibleSet([1, 2, [3, 4], {"a"}]) != FlexibleSet([1, [3, 4], {"a"}])
test_fs_3 = FlexibleSet([1, 2, [3, 4], {"a"}])
test_fs_3.clear()
assert test_fs_3.set == set()
assert test_fs_3.list == list()
test_fs_3 = FlexibleSet([1, 2, [3, 4], {"a"}])
assert repr(test_fs_3) == "FlexibleSet([1, 2, [3, 4], {'a'}])"
================================================
FILE: tox.ini
================================================
[tox]
install_command = pip install {opts} {packages}
envlist = py35,pypy35,lint
indexserver =
default = https://pypi.python.org/simple
[testenv]
usedevelop = True
commands =
rm -f .coverage
py.test --cov=kanren -vv {posargs:kanren}
deps =
-r{toxinidir}/requirements.txt
coverage
nose
pytest
pytest-cov
whitelist_externals =
rm
[testenv:lint]
deps =
flake8
commands =
flake8 kanren
basepython = python3.5
[testenv:yapf]
# Tox target for autoformatting the code for pep8.
deps =
yapf
commands =
yapf --recursive kanren --in-place
basepython = python3.5
[flake8]
ignore = E731,F811,E712,E127,E126,C901,W503,W504
gitextract_gnvw45a6/ ├── .gitattributes ├── .github/ │ ├── FUNDING.yml │ └── workflows/ │ ├── pypi.yml │ └── tests.yml ├── .gitignore ├── .pre-commit-config.yaml ├── .pylintrc ├── LICENSE.txt ├── MANIFEST.in ├── Makefile ├── README.md ├── doc/ │ ├── basic.md │ └── graphs.md ├── examples/ │ ├── __init__.py │ ├── account.py │ ├── commutative.py │ ├── corleone.py │ ├── data/ │ │ ├── adjacent-states.txt │ │ └── coastal-states.txt │ ├── states.py │ ├── user_classes.py │ └── zebra-puzzle.py ├── kanren/ │ ├── __init__.py │ ├── assoccomm.py │ ├── constraints.py │ ├── core.py │ ├── facts.py │ ├── goals.py │ ├── graph.py │ ├── py.typed │ ├── term.py │ └── util.py ├── pyproject.toml ├── pytest.ini ├── release-notes ├── requirements.txt ├── setup.cfg ├── tests/ │ ├── __init__.py │ ├── test_assoccomm.py │ ├── test_constraints.py │ ├── test_core.py │ ├── test_facts.py │ ├── test_goals.py │ ├── test_graph.py │ ├── test_sudoku.py │ ├── test_term.py │ └── test_util.py └── tox.ini
SYMBOL INDEX (257 symbols across 20 files)
FILE: examples/account.py
class Account (line 1) | class Account(object):
method __init__ (line 2) | def __init__(self, first, last, id, balance):
method info (line 8) | def info(self):
method __eq__ (line 11) | def __eq__(self, other):
method __hash__ (line 16) | def __hash__(self):
method __str__ (line 19) | def __str__(self):
FILE: examples/corleone.py
function parent (line 51) | def parent(p, child):
function grandparent (line 59) | def grandparent(gparent, child):
function sibling (line 72) | def sibling(a, b):
FILE: examples/zebra-puzzle.py
class House (line 15) | class House:
function righto (line 23) | def righto(right, left, houses):
function nexto (line 29) | def nexto(a, b, houses):
FILE: kanren/assoccomm.py
function flatten_assoc_args (line 52) | def flatten_assoc_args(op_predicate, items):
function assoc_args (line 64) | def assoc_args(rator, rands, n, ctor=None):
function eq_assoc_args (line 90) | def eq_assoc_args(
function eq_assoc (line 177) | def eq_assoc(u, v, n=None, op_predicate=associative, null_type=etuple):
function eq_comm (line 197) | def eq_comm(u, v, op_predicate=commutative, null_type=etuple):
function assoc_flatten (line 218) | def assoc_flatten(a, a_flat):
function eq_assoccomm (line 240) | def eq_assoccomm(u, v, null_type=etuple):
FILE: kanren/constraints.py
class ConstraintStore (line 16) | class ConstraintStore(ABC):
method __init__ (line 32) | def __init__(self, lvar_constraints=None):
method pre_unify_check (line 37) | def pre_unify_check(self, lvar_map, lvar=None, value=None):
method post_unify_check (line 42) | def post_unify_check(self, lvar_map, lvar=None, value=None, old_state=...
method add (line 49) | def add(self, lvar, lvar_constraint, **kwargs):
method constraints_str (line 56) | def constraints_str(self, lvar):
method copy (line 63) | def copy(self):
method __contains__ (line 68) | def __contains__(self, lvar):
method __eq__ (line 71) | def __eq__(self, other):
method __repr__ (line 78) | def __repr__(self):
class ConstrainedState (line 82) | class ConstrainedState(UserDict):
method __init__ (line 87) | def __init__(self, *s, constraints=None):
method pre_unify_checks (line 91) | def pre_unify_checks(self, lvar, value):
method post_unify_checks (line 98) | def post_unify_checks(self, lvar_map, lvar, value):
method copy (line 115) | def copy(self, data=None):
method __eq__ (line 122) | def __eq__(self, other):
method __repr__ (line 131) | def __repr__(self):
function unify_ConstrainedState (line 135) | def unify_ConstrainedState(u, v, S):
class ConstrainedVar (line 149) | class ConstrainedVar(Var):
method __init__ (line 158) | def __init__(self, var, S):
method __repr__ (line 163) | def __repr__(self):
method __eq__ (line 175) | def __eq__(self, other):
method __hash__ (line 183) | def __hash__(self):
function _reify_ConstrainedState (line 187) | def _reify_ConstrainedState(u, S):
class DisequalityStore (line 199) | class DisequalityStore(ConstraintStore):
method __init__ (line 204) | def __init__(self, lvar_constraints=None):
method post_unify_check (line 207) | def post_unify_check(self, lvar_map, lvar=None, value=None, old_state=...
method pre_unify_check (line 238) | def pre_unify_check(self, lvar_map, lvar=None, value=None):
function neq (line 242) | def neq(u, v):
class PredicateStore (line 279) | class PredicateStore(ConstraintStore, ABC):
method cparam_type_check (line 292) | def cparam_type_check(self, lvt):
method constraint_check (line 297) | def constraint_check(self, lv, lvt):
method constraint_isground (line 305) | def constraint_isground(self, lv, lvar_map):
method post_unify_check (line 309) | def post_unify_check(self, lvar_map, lvar=None, value=None, old_state=...
method pre_unify_check (line 363) | def pre_unify_check(self, lvar_map, lvar=None, value=None):
class TypeStore (line 367) | class TypeStore(PredicateStore):
method __init__ (line 374) | def __init__(self, lvar_constraints=None):
method add (line 377) | def add(self, lvt, cparams):
method cparam_type_check (line 386) | def cparam_type_check(self, x):
method constraint_check (line 389) | def constraint_check(self, x, cx):
method constraint_isground (line 392) | def constraint_isground(self, lv, lvar_map):
function typeo (line 396) | def typeo(u, u_type):
class IsinstanceStore (line 429) | class IsinstanceStore(PredicateStore):
method __init__ (line 437) | def __init__(self, lvar_constraints=None):
method cparam_type_check (line 443) | def cparam_type_check(self, lvt):
method constraint_check (line 446) | def constraint_check(self, lv, lvt):
method constraint_isground (line 449) | def constraint_isground(self, lv, lvar_map):
function isinstanceo (line 453) | def isinstanceo(u, u_type):
FILE: kanren/core.py
function fail (line 29) | def fail(s: StateType) -> Iterator[StateType]:
function succeed (line 33) | def succeed(s: StateType) -> Iterator[StateType]:
function eq (line 37) | def eq(u: Any, v: Any) -> GoalType:
function ldisj_seq (line 55) | def ldisj_seq(goals: Iterable[GoalType]) -> GoalType:
function bind (line 76) | def bind(z: StateStreamType, g: GoalType) -> StateStreamType:
function lconj_seq (line 83) | def lconj_seq(goals: Iterable[GoalType]) -> GoalType:
function ldisj (line 109) | def ldisj(*goals: Union[GoalType, Iterable[GoalType]]) -> GoalType:
function lconj (line 117) | def lconj(*goals: Union[GoalType, Iterable[GoalType]]) -> GoalType:
function conde (line 125) | def conde(
function ground_order_key (line 141) | def ground_order_key(S: StateType, x: Any) -> Literal[-1, 0, 1, 2]:
function ground_order (line 152) | def ground_order(in_args: Any, out_args: Any) -> GoalType:
function ifa (line 172) | def ifa(g1: GoalType, g2: GoalType) -> GoalType:
function Zzz (line 188) | def Zzz(gctor: Callable[[Any], GoalType], *args, **kwargs) -> GoalType:
function run (line 197) | def run(
function dbgo (line 243) | def dbgo(*args: Any, msg: Optional[Any] = None) -> GoalType: # pragma: ...
FILE: kanren/facts.py
class Relation (line 7) | class Relation(object):
method __init__ (line 10) | def __init__(self, name=None):
method add_fact (line 18) | def add_fact(self, *inputs):
method __call__ (line 35) | def __call__(self, *args):
method __str__ (line 75) | def __str__(self):
method __repr__ (line 78) | def __repr__(self):
function fact (line 82) | def fact(rel, *args):
function facts (line 97) | def facts(rel, *lists):
FILE: kanren/goals.py
function heado (line 15) | def heado(head, coll):
function tailo (line 26) | def tailo(tail, coll):
function conso (line 37) | def conso(h, t, r):
function nullo (line 42) | def nullo(*args, refs=None, default_ConsNull=list):
function itero (line 103) | def itero(lst, nullo_refs=None, default_ConsNull=list):
function membero (line 125) | def membero(x, ls):
function appendo (line 141) | def appendo(lst, s, out, default_ConsNull=list):
function rembero (line 180) | def rembero(x, lst, o, default_ConsNull=list):
function permuteo (line 214) | def permuteo(a, b, inner_eq=eq, default_ConsNull=list, no_ident=False):
FILE: kanren/graph.py
function mapo (line 11) | def mapo(relation, a, b, null_type=list, null_res=True, first=True):
function map_anyo (line 28) | def map_anyo(
function vararg_success (line 83) | def vararg_success(*args):
function eq_length (line 87) | def eq_length(u, v, default_ConsNull=list):
function reduceo (line 93) | def reduceo(relation, in_term, out_term, *args, **kwargs):
function walko (line 148) | def walko(
function term_walko (line 215) | def term_walko(
FILE: kanren/term.py
function applyo (line 14) | def applyo(o_rator, o_rands, obj):
function term_Sequence (line 59) | def term_Sequence(rator, rands):
function unifiable_with_term (line 66) | def unifiable_with_term(cls):
function reify_term (line 72) | def reify_term(obj, s):
function unify_term (line 80) | def unify_term(u, v, s):
FILE: kanren/util.py
class FlexibleSet (line 9) | class FlexibleSet(MutableSet):
method __init__ (line 14) | def __init__(self, iterable=None):
method add (line 23) | def add(self, item):
method discard (line 32) | def discard(self, item):
method clear (line 38) | def clear(self):
method pop (line 42) | def pop(self):
method remove (line 51) | def remove(self, item):
method copy (line 60) | def copy(self):
method __le__ (line 66) | def __le__(self, other):
method __ge__ (line 69) | def __ge__(self, other):
method __iter__ (line 72) | def __iter__(self):
method __contains__ (line 75) | def __contains__(self, value):
method __len__ (line 81) | def __len__(self):
method __eq__ (line 84) | def __eq__(self, other):
method __repr__ (line 92) | def __repr__(self):
function hashable (line 96) | def hashable(x):
function dicthash (line 104) | def dicthash(d):
function make_hashable (line 108) | def make_hashable(x):
function multihash (line 121) | def multihash(x):
function unique (line 125) | def unique(seq, key=lambda x: x):
function intersection (line 144) | def intersection(*seqs):
function groupsizes (line 148) | def groupsizes(total, len):
function pprint (line 163) | def pprint(g): # pragma: no cover
function index (line 174) | def index(tup, ind):
FILE: tests/test_assoccomm.py
function clear_assoccomm (line 26) | def clear_assoccomm():
class Node (line 40) | class Node(object):
method __init__ (line 41) | def __init__(self, op, args):
method __eq__ (line 45) | def __eq__(self, other):
method __hash__ (line 52) | def __hash__(self):
method __str__ (line 55) | def __str__(self):
class Operator (line 61) | class Operator(object):
method __init__ (line 62) | def __init__(self, name):
function add (line 70) | def add(*args):
function mul (line 74) | def mul(*args):
function term_Operator (line 79) | def term_Operator(op, args):
function arguments_Node (line 84) | def arguments_Node(n):
function operator_Node (line 89) | def operator_Node(n):
function results (line 93) | def results(g, s=None):
function test_eq_comm (line 99) | def test_eq_comm():
function test_eq_comm_object (line 189) | def test_eq_comm_object():
function test_flatten_assoc_args (line 202) | def test_flatten_assoc_args():
function test_assoc_args (line 221) | def test_assoc_args():
function test_eq_assoc_args (line 249) | def test_eq_assoc_args():
function test_eq_assoc (line 335) | def test_eq_assoc():
function test_assoc_flatten (line 407) | def test_assoc_flatten():
function test_eq_assoccomm (line 443) | def test_eq_assoccomm():
function test_assoccomm_algebra (line 536) | def test_assoccomm_algebra():
function test_assoccomm_objects (line 554) | def test_assoccomm_objects():
FILE: tests/test_constraints.py
function test_ConstrainedState (line 21) | def test_ConstrainedState():
function test_reify (line 72) | def test_reify():
function test_ConstraintStore (line 91) | def test_ConstraintStore():
function test_ConstrainedVar (line 99) | def test_ConstrainedVar():
function test_disequality_basic (line 113) | def test_disequality_basic():
function test_disequality (line 162) | def test_disequality():
function test_typeo_basic (line 212) | def test_typeo_basic():
function test_typeo (line 231) | def test_typeo():
function test_instanceo_basic (line 279) | def test_instanceo_basic():
function test_instanceo (line 295) | def test_instanceo():
FILE: tests/test_core.py
function results (line 25) | def results(g, s=None):
function test_eq (line 31) | def test_eq():
function test_lconj_basics (line 37) | def test_lconj_basics():
function test_ldisj_basics (line 72) | def test_ldisj_basics():
function test_conde_basics (line 101) | def test_conde_basics():
function test_lany (line 139) | def test_lany():
function test_lall (line 145) | def test_lall():
function test_conde (line 153) | def test_conde():
function test_short_circuit (line 166) | def test_short_circuit():
function test_run (line 174) | def test_run():
function test_run_output_reify (line 186) | def test_run_output_reify():
function test_lanyseq (line 191) | def test_lanyseq():
function test_lall_errors (line 202) | def test_lall_errors():
function test_dict (line 216) | def test_dict():
function test_ifa (line 221) | def test_ifa():
function test_ground_order (line 246) | def test_ground_order():
FILE: tests/test_facts.py
function test_relation (line 7) | def test_relation():
function test_fact (line 30) | def test_fact():
function test_unify_variable_with_itself_should_not_unify (line 41) | def test_unify_variable_with_itself_should_not_unify():
function test_unify_variable_with_itself_should_unify (line 50) | def test_unify_variable_with_itself_should_unify():
function test_unify_tuple (line 59) | def test_unify_tuple():
FILE: tests/test_goals.py
function results (line 20) | def results(g, s=None):
function test_heado (line 26) | def test_heado():
function test_tailo (line 35) | def test_tailo():
function test_conso (line 45) | def test_conso():
function test_nullo_itero (line 64) | def test_nullo_itero():
function test_membero (line 102) | def test_membero():
function test_uneval_membero (line 127) | def test_uneval_membero():
function test_appendo (line 139) | def test_appendo():
function test_appendo_reorder (line 180) | def test_appendo_reorder():
function test_rembero (line 210) | def test_rembero():
function test_permuteo (line 227) | def test_permuteo():
FILE: tests/test_graph.py
class OrderedFunction (line 17) | class OrderedFunction(object):
method __init__ (line 18) | def __init__(self, func):
method __call__ (line 21) | def __call__(self, *args, **kwargs):
method __name__ (line 25) | def __name__(self):
method __lt__ (line 28) | def __lt__(self, other):
method __gt__ (line 31) | def __gt__(self, other):
method __repr__ (line 34) | def __repr__(self):
function single_math_reduceo (line 56) | def single_math_reduceo(expanded_term, reduced_term):
function test_basics (line 82) | def test_basics():
function test_reduceo (line 90) | def test_reduceo():
function test_mapo (line 112) | def test_mapo():
function test_eq_length (line 136) | def test_eq_length():
function test_map_anyo_types (line 159) | def test_map_anyo_types():
function test_map_anyo_misc (line 178) | def test_map_anyo_misc():
function test_map_anyo (line 265) | def test_map_anyo(test_input, test_output):
function test_map_anyo_reverse (line 290) | def test_map_anyo_reverse():
function test_walko_misc (line 325) | def test_walko_misc():
function test_walko (line 412) | def test_walko(test_input, test_output):
function test_walko_reverse (line 437) | def test_walko_reverse():
FILE: tests/test_sudoku.py
function get_rows (line 16) | def get_rows(board):
function get_columns (line 20) | def get_columns(rows):
function get_square (line 24) | def get_square(rows, x, y):
function get_squares (line 28) | def get_squares(rows):
function vars (line 32) | def vars(hints):
function all_numbers (line 42) | def all_numbers(coll):
function sudoku_solver (line 46) | def sudoku_solver(hints):
function test_missing_one_entry (line 61) | def test_missing_one_entry():
function test_missing_complex_board (line 88) | def test_missing_complex_board():
function test_unsolvable (line 115) | def test_unsolvable():
function test_many_missing_elements (line 132) | def test_many_missing_elements():
function test_websudoku_easy (line 159) | def test_websudoku_easy():
FILE: tests/test_term.py
class Node (line 10) | class Node(object):
method __init__ (line 11) | def __init__(self, op, args):
method __eq__ (line 15) | def __eq__(self, other):
method __hash__ (line 22) | def __hash__(self):
method __str__ (line 25) | def __str__(self):
class Operator (line 31) | class Operator(object):
method __init__ (line 32) | def __init__(self, name):
function add (line 40) | def add(*args):
function mul (line 44) | def mul(*args):
class Op (line 48) | class Op(object):
method __init__ (line 49) | def __init__(self, name):
function arguments_Node (line 54) | def arguments_Node(t):
function operator_Node (line 59) | def operator_Node(t):
function term_Op (line 64) | def term_Op(op, args):
function test_applyo (line 68) | def test_applyo():
function test_applyo_object (line 104) | def test_applyo_object():
function test_unifiable_with_term (line 111) | def test_unifiable_with_term():
FILE: tests/test_util.py
function test_hashable (line 14) | def test_hashable():
function test_unique (line 21) | def test_unique():
function test_unique_dict (line 26) | def test_unique_dict():
function test_unique_not_hashable (line 31) | def test_unique_not_hashable():
function test_multihash (line 35) | def test_multihash():
function test_intersection (line 40) | def test_intersection():
function test_groupsizes (line 46) | def test_groupsizes():
function test_flexibleset (line 53) | def test_flexibleset():
Condensed preview — 48 files, each showing path, character count, and a content snippet. Download the .json file or copy for the full structured content (203K chars).
[
{
"path": ".gitattributes",
"chars": 32,
"preview": "kanren/_version.py export-subst\n"
},
{
"path": ".github/FUNDING.yml",
"chars": 25,
"preview": "github: [brandonwillard]\n"
},
{
"path": ".github/workflows/pypi.yml",
"chars": 1843,
"preview": "name: PyPI\non:\n push:\n branches:\n - main\n - auto-release\n pull_request:\n branches: [main]\n release:\n "
},
{
"path": ".github/workflows/tests.yml",
"chars": 2947,
"preview": "name: Tests\n\non:\n push:\n branches:\n - main\n pull_request:\n branches:\n - main\n\n# Cancels all previous w"
},
{
"path": ".gitignore",
"chars": 2658,
"preview": "# Created by https://www.gitignore.io/api/vim,emacs,python\n# Edit at https://www.gitignore.io/?templates=vim,emacs,pytho"
},
{
"path": ".pre-commit-config.yaml",
"chars": 1148,
"preview": "exclude: |\n (?x)^(\n versioneer\\.py|\n kanren/_version\\.py|\n doc/.*|\n bin/.*\n )$\nrepos:\n"
},
{
"path": ".pylintrc",
"chars": 11505,
"preview": "[MASTER]\n# Use multiple processes to speed up Pylint.\njobs=0\n\n# Allow loading of arbitrary C extensions. Extensions are "
},
{
"path": "LICENSE.txt",
"chars": 1531,
"preview": "Copyright (c) 2019 Brandon T. Willard\nCopyright (c) 2012 Matthew Rocklin\n\nAll rights reserved.\n\nRedistribution and use i"
},
{
"path": "MANIFEST.in",
"chars": 458,
"preview": "include LICENSE.txt\ninclude pyproject.toml\ninclude README.md\ninclude setup.cfg\n\ngraft kanren\n\nprune .github\nprune doc\npr"
},
{
"path": "Makefile",
"chars": 2509,
"preview": ".PHONY: help venv conda docker docstyle format style black test lint check coverage pypi\n.DEFAULT_GOAL = help\n\nPYTHON = "
},
{
"path": "README.md",
"chars": 6329,
"preview": "# `kanren`\n\n[](https://travis-ci.org/pythologi"
},
{
"path": "doc/basic.md",
"chars": 5635,
"preview": "# Basics of `miniKanren`\n\nThe design of `miniKanren` is simple. It orchestrates only a few basic operations and yields "
},
{
"path": "doc/graphs.md",
"chars": 9241,
"preview": "# Relational Graph Manipulation\n\nIn this document, we show how `kanren` can be used to perform symbolic algebra operatio"
},
{
"path": "examples/__init__.py",
"chars": 0,
"preview": ""
},
{
"path": "examples/account.py",
"chars": 585,
"preview": "class Account(object):\n def __init__(self, first, last, id, balance):\n self.first = first\n self.last = "
},
{
"path": "examples/commutative.py",
"chars": 638,
"preview": "from kanren import fact, run, var\nfrom kanren.assoccomm import associative, commutative\nfrom kanren.assoccomm import eq_"
},
{
"path": "examples/corleone.py",
"chars": 1900,
"preview": "\"\"\"\nFamily relationships from The Godfather Translated from the core.logic example\nfound in \"The Magical Island of Kanre"
},
{
"path": "examples/data/adjacent-states.txt",
"chars": 1020,
"preview": "# Author Gregg Lind\n# License: Public Domain. I would love to hear about any projects you use\n# if it for though!\n# "
},
{
"path": "examples/data/coastal-states.txt",
"chars": 63,
"preview": "WA,OR,CA,TX,LA,MI,AL,GA,FL,SC,NC,VI,MD,DW,NJ,NY,CT,RI,MA,MN,NH\n"
},
{
"path": "examples/states.py",
"chars": 1522,
"preview": "\"\"\"\nAn example showing how to use facts and relations to store data and query data\n\nThis example builds a small database"
},
{
"path": "examples/user_classes.py",
"chars": 1290,
"preview": "from operator import add, gt, sub\n\nfrom examples.account import Account\nfrom kanren import eq, membero, run, unifiable, "
},
{
"path": "examples/zebra-puzzle.py",
"chars": 3009,
"preview": "\"\"\"\nZebra puzzle as published in Life International in 1962.\nhttps://en.wikipedia.org/wiki/Zebra_Puzzle\n\"\"\"\nfrom datacla"
},
{
"path": "kanren/__init__.py",
"chars": 543,
"preview": "# flake8: noqa\n\"\"\"kanren is a Python library for logic and relational programming.\"\"\"\nfrom importlib.metadata import ver"
},
{
"path": "kanren/assoccomm.py",
"chars": 8899,
"preview": "\"\"\"Functions for associative and commutative unification.\n\nThis module provides goals for associative and commutative un"
},
{
"path": "kanren/constraints.py",
"chars": 15582,
"preview": "import weakref\nfrom abc import ABC, abstractmethod\nfrom collections import UserDict\nfrom collections.abc import Mapping\n"
},
{
"path": "kanren/core.py",
"chars": 6756,
"preview": "from collections.abc import Sequence\nfrom functools import partial, reduce\nfrom itertools import tee\nfrom operator impor"
},
{
"path": "kanren/facts.py",
"chars": 2802,
"preview": "from toolz import merge\nfrom unification import reify, unify\n\nfrom .util import intersection\n\n\nclass Relation(object):\n "
},
{
"path": "kanren/goals.py",
"chars": 9812,
"preview": "from collections import Counter\nfrom collections.abc import Sequence\nfrom functools import partial\nfrom itertools import"
},
{
"path": "kanren/graph.py",
"chars": 9084,
"preview": "from functools import partial\n\nfrom etuples import etuple\nfrom unification import isvar, reify, var\n\nfrom .core import Z"
},
{
"path": "kanren/py.typed",
"chars": 0,
"preview": ""
},
{
"path": "kanren/term.py",
"chars": 2819,
"preview": "from collections.abc import Mapping, Sequence\n\nfrom cons.core import ConsError, cons\nfrom etuples import apply as term\nf"
},
{
"path": "kanren/util.py",
"chars": 4575,
"preview": "from collections import namedtuple\nfrom collections.abc import Hashable, Iterable, Mapping, MutableSet, Set\nfrom itertoo"
},
{
"path": "pyproject.toml",
"chars": 1520,
"preview": "[build-system]\nrequires = [\"setuptools>=77.0.0\", \"setuptools-scm[toml]\"]\nbuild-backend = \"setuptools.build_meta\"\n\n[proje"
},
{
"path": "pytest.ini",
"chars": 169,
"preview": "# content of pytest.ini\n[pytest]\naddopts = --doctest-modules\nnorecursedirs = examples\ntestpaths = tests\ndoctest_optionfl"
},
{
"path": "release-notes",
"chars": 455,
"preview": "New in version 0.2\n\n* Python 3 support\n* Dictionary unification\n* Use multiple dispatch to extend unify, reify, is"
},
{
"path": "requirements.txt",
"chars": 207,
"preview": "-e ./\ncoveralls\npydocstyle>=3.0.0\npytest>=5.0.0\npytest-cov>=2.6.1\npytest-html>=1.20.0\npylint>=2.3.1\nblack>=19.3b0; platf"
},
{
"path": "setup.cfg",
"chars": 1292,
"preview": "[pydocstyle]\n# Ignore errors for missing docstrings.\n# Ignore D202 (No blank lines allowed after function docstring)\n# d"
},
{
"path": "tests/__init__.py",
"chars": 0,
"preview": ""
},
{
"path": "tests/test_assoccomm.py",
"chars": 15707,
"preview": "from collections.abc import Sequence\nfrom copy import copy\n\nimport pytest\nfrom cons import cons\nfrom etuples.core import"
},
{
"path": "tests/test_constraints.py",
"chars": 12091,
"preview": "from itertools import permutations\n\nfrom cons import cons\nfrom pytest import raises\nfrom unification import reify, unify"
},
{
"path": "tests/test_core.py",
"chars": 6045,
"preview": "from collections.abc import Iterator\nfrom itertools import count\n\nfrom cons import cons\nfrom pytest import raises\nfrom u"
},
{
"path": "tests/test_facts.py",
"chars": 1863,
"preview": "from unification import var\n\nfrom kanren.core import conde, run\nfrom kanren.facts import Relation, fact, facts\n\n\ndef tes"
},
{
"path": "tests/test_goals.py",
"chars": 10726,
"preview": "import pytest\nfrom cons import cons\nfrom cons.core import ConsPair\nfrom unification import isvar, unify, var\n\nfrom kanre"
},
{
"path": "tests/test_graph.py",
"chars": 13925,
"preview": "from functools import partial\nfrom math import exp, log\nfrom numbers import Real\nfrom operator import add, mul\n\nimport p"
},
{
"path": "tests/test_sudoku.py",
"chars": 4914,
"preview": "\"\"\"\nBased off\nhttps://github.com/holtchesley/embedded-logic/blob/master/kanren/sudoku.ipynb\n\"\"\"\nimport pytest\nfrom unifi"
},
{
"path": "tests/test_term.py",
"chars": 3086,
"preview": "from cons import cons\nfrom etuples import etuple\nfrom unification import reify, unify, var\n\nfrom kanren.core import run\n"
},
{
"path": "tests/test_util.py",
"chars": 3316,
"preview": "from pytest import raises\n\nfrom kanren.util import (\n FlexibleSet,\n dicthash,\n groupsizes,\n hashable,\n in"
},
{
"path": "tox.ini",
"chars": 666,
"preview": "[tox]\ninstall_command = pip install {opts} {packages}\nenvlist = py35,pypy35,lint\nindexserver =\n default = https://pyp"
}
]
About this extraction
This page contains the full source code of the pythological/kanren GitHub repository, extracted and formatted as plain text for AI agents and large language models (LLMs). The extraction includes 48 files (188.2 KB), approximately 59.5k tokens, and a symbol index with 257 extracted functions, classes, methods, constants, and types. Use this with OpenClaw, Claude, ChatGPT, Cursor, Windsurf, or any other AI tool that accepts text input. You can copy the full output to your clipboard or download it as a .txt file.
Extracted by GitExtract — free GitHub repo to text converter for AI. Built by Nikandr Surkov.