[
  {
    "path": ".coveragerc",
    "content": "[run]\nbranch = True\nsource = dask_image\n[report]\nexclude_lines =\n    # Include the no cover pragma as it needs to be listed explicitly when\n    # using exclude_lines.\n    # ( http://coverage.readthedocs.io/en/coverage-4.1/excluding.html#advanced-exclusion )\n    pragma: no cover\n\n    # Ignore coverage of code that requires the module to be executed.\n    if __name__ == .__main__.:\n\n    # Ignore continue statement in code as it can't be detected as covered\n    # due to an optimization by the Python interpreter. See coverage issue\n    # ( https://bitbucket.org/ned/coveragepy/issue/198/continue-marked-as-not-covered )\n    # and Python issue ( http://bugs.python.org/issue2506 ).\n    continue\nomit =\n    */python?.?/*\n    */site-packages/*\n    */eggs/*\n    */.eggs/*\n    *tests/*\n    */_version.py\n    */_vendor/*\n    */dispatch/*\n"
  },
  {
    "path": ".coveralls.yml",
    "content": "repo_token: mu5JxVQy1FJSQvhczAzyHvaXx4qfHhF1R\n"
  },
  {
    "path": ".editorconfig",
    "content": "# http://editorconfig.org\n\nroot = true\n\n[*]\nindent_style = space\nindent_size = 4\ntrim_trailing_whitespace = true\ninsert_final_newline = true\ncharset = utf-8\nend_of_line = lf\n\n[*.bat]\nindent_style = tab\nend_of_line = crlf\n\n[LICENSE]\ninsert_final_newline = false\n\n[Makefile]\nindent_style = tab\n"
  },
  {
    "path": ".gitattributes",
    "content": "dask_image/_version.py export-subst\n\n*.bat text eol=crlf\n*.sh text eol=lf\n*.yaml text eol=lf\n"
  },
  {
    "path": ".github/ISSUE_TEMPLATE.md",
    "content": "* dask-image version:\n* Python version:\n* Operating System:\n\n### Description\n\nDescribe what you were trying to get done.\nTell us what happened, what went wrong, and what you expected to happen.\n\n### What I Did\n\n```\nPaste the command(s) you ran and the output.\nIf there was a crash, please include the traceback here.\n```\n"
  },
  {
    "path": ".github/PULL_REQUEST_TEMPLATE.md",
    "content": "Before you submit a pull request, check that it meets these guidelines:\n\n1. The pull request should include tests.\n2. If the pull request adds functionality, the docs should be updated. Put\n   your new functionality into a function with a docstring, and add the\n   feature to the list in README.rst.\n3. The pull request should pass all our continuous integration checks before it is merged.\n"
  },
  {
    "path": ".github/dependabot.yml",
    "content": "# Set update schedule for GitHub Actions\n\nversion: 2\nupdates:\n  - package-ecosystem: \"github-actions\"\n    directory: \"/\"\n    schedule:\n      # Check for updates to GitHub Actions every weekday\n      interval: \"weekly\"\n"
  },
  {
    "path": ".github/workflows/test_and_deploy.yml",
    "content": "name: test_and_deploy\n\non:\n  push:\n    branches:\n      - main\n    tags:\n      - \"v*\" # Push events to matching v*, i.e. v1.0, v20.15.10\n  pull_request: null\n  workflow_dispatch: null\n\njobs:\n  test:\n    runs-on: ${{ matrix.os }}\n    strategy:\n      fail-fast: true\n      matrix:\n        os: [\"windows-latest\", \"ubuntu-latest\", \"macos-latest\"]\n        python-version: [\"3.9\", \"3.10\", \"3.11\", \"3.12\"]\n\n    steps:\n      - name: Checkout source\n        uses: actions/checkout@v6\n\n      - name: Setup Conda Environment\n        uses: conda-incubator/setup-miniconda@v4\n        with:\n          python-version: ${{ matrix.python-version }}\n          environment-file: continuous_integration/environment-${{ matrix.python-version }}.yml\n          activate-environment: dask-image-testenv\n          auto-activate-base: false\n\n      - name: Install dask-image\n        shell: bash -l {0}\n        run: |\n          conda activate dask-image-testenv\n          python -m pip install -e .[dataframe]\n          conda list\n\n      - name: Run tests\n        shell: bash -l {0}\n        run: pytest -v --cov=dask_image --cov-report lcov\n\n      - name: Coveralls Parallel\n        uses: coverallsapp/github-action@v2.3.7\n        with:\n          github-token: ${{ secrets.github_token }}\n          flag-name: run-${{ matrix.test_number }}\n          parallel: true\n          path-to-lcov: coverage.lcov\n\n  test-minimal:\n    # Verify dask-image works without the optional `dataframe` extras\n    # (i.e. without pandas and dask[dataframe]).\n    runs-on: ubuntu-latest\n    steps:\n      - name: Checkout source\n        uses: actions/checkout@v5\n\n      - name: Set up Python\n        uses: actions/setup-python@v6\n        with:\n          python-version: \"3.12\"\n\n      - name: Install dask-image without dataframe extras\n        run: |\n          python -m pip install --upgrade pip\n          python -m pip install -e .[test]\n\n      - name: Run tests (find_objects tests are skipped automatically)\n        run: pytest -v\n\n  coveralls:\n    needs: test\n    runs-on: ubuntu-latest\n    steps:\n    - name: Coveralls Finished\n      uses: coverallsapp/github-action@v2.3.7\n      with:\n        github-token: ${{ secrets.github_token }}\n        parallel-finished: true\n\n  deploy:\n    # This will upload a Python Package using Twine when a release is created\n    # For more information see: https://help.github.com/en/actions/language-and-framework-guides/using-python-with-github-actions#publishing-to-package-registries\n    #\n    # This job will run when you have tagged a commit, starting with \"v*\"\n    # or created a release in GitHub which includes a tag starting with \"v*\"\n    # and requires that you have put your twine API key in your\n    # github secrets (see readme for details)\n    needs: [test]\n    runs-on: ubuntu-latest\n    if: contains(github.ref, 'tags')\n    steps:\n    - uses: actions/checkout@v6\n    - name: Set up Python\n      uses: actions/setup-python@v6\n      with:\n        python-version: '3.x'\n    - name: Install dependencies\n      run: |\n        python -m pip install --upgrade pip\n        pip install build twine\n    - name: Build and publish\n      env:\n        TWINE_USERNAME: ${{ secrets.PYPI_USERNAME }}\n        TWINE_PASSWORD: ${{ secrets.PYPI_PASSWORD }}\n      run: |\n        python -m build\n        twine upload dist/*\n"
  },
  {
    "path": ".gitignore",
    "content": "# setuptools-scm dynamically generated version\ndask_image/_version.py\n\n# Byte-compiled / optimized / DLL files\n__pycache__/\n*.py[cod]\n*$py.class\n\n# C extensions\n*.so\n\n# Distribution / packaging\n.Python\nenv/\nbuild/\ndevelop-eggs/\ndist/\ndownloads/\neggs/\n.eggs/\nlib/\nlib64/\nparts/\nsdist/\nvar/\n*.egg-info/\n.installed.cfg\n*.egg\n\n# PyInstaller\n#  Usually these files are written by a python script from a template\n#  before PyInstaller builds the exe, so as to inject date/other infos into it.\n*.manifest\n*.spec\n\n# Installer logs\npip-log.txt\npip-delete-this-directory.txt\n\n# Unit test / coverage reports\nhtmlcov/\n.tox/\n.coverage\n.coverage.*\n.cache\nnosetests.xml\ncoverage.xml\n*,cover\n.hypothesis/\n.pytest_cache/\n\n# Translations\n*.mo\n*.pot\n\n# Django stuff:\n*.log\n\n# Sphinx documentation\ndocs/_build/\ndocs/dask_image*.rst\n\n# PyBuilder\ntarget/\n\n# pyenv python configuration file\n.python-version\n"
  },
  {
    "path": ".readthedocs.yml",
    "content": "version: 2\n\nbuild:\n  os: \"ubuntu-22.04\"\n  tools:\n    python: \"mambaforge-4.10\"\n  jobs:\n    pre_install:\n      # Avoid `git` treating the directory is dirty due to RTD changes.\n      # ref: https://docs.readthedocs.io/en/stable/build-customization.html#avoid-having-a-dirty-git-index\n      - >-\n        git update-index --assume-unchanged\n        continuous_integration/environment-doc.yml\n        docs/conf.py\n      # If we missed any, error and list the changed files.\n      - git diff --stat --exit-code\n\nsphinx:\n  configuration: docs/conf.py\n\nconda:\n  environment: continuous_integration/environment-doc.yml\n\npython:\n  install:\n    - method: pip\n      path: .\n"
  },
  {
    "path": "AUTHORS.rst",
    "content": "=======\nCredits\n=======\n\nDevelopment Lead\n----------------\n\n* John Kirkham `@jakirkham <https://github.com/jakirkham>`_\n\nContributors\n------------\n\nSee the full list of contributors `here <https://github.com/dask/dask-image/graphs/contributors>`_\n"
  },
  {
    "path": "CONTRIBUTING.rst",
    "content": ".. highlight:: shell\n\n============\nContributing\n============\n\nContributions are welcome, and they are greatly appreciated! Every\nlittle bit helps, and credit will always be given.\n\nYou can contribute in many ways:\n\nTypes of Contributions\n----------------------\n\nReport Bugs\n~~~~~~~~~~~\n\nReport bugs at https://github.com/dask/dask-image/issues.\n\nIf you are reporting a bug, please include:\n\n* Your operating system name and version.\n* Any details about your local setup that might be helpful in troubleshooting.\n* Detailed steps to reproduce the bug.\n\nFix Bugs\n~~~~~~~~\n\nLook through the GitHub issues for bugs. Anything tagged with \"bug\"\nand \"help wanted\" is open to whoever wants to implement it.\n\nImplement Features\n~~~~~~~~~~~~~~~~~~\n\nLook through the GitHub issues for features. Anything tagged with \"enhancement\"\nand \"help wanted\" is open to whoever wants to implement it.\n\nWrite Documentation\n~~~~~~~~~~~~~~~~~~~\n\ndask-image could always use more documentation, whether as part of the\nofficial dask-image docs, in docstrings, or even on the web in blog posts,\narticles, and such.\n\nTo build the documentation locally and preview your changes, first set up the\nconda environment for building the dask-image documentation:\n\n.. code-block:: console\n\n    $ conda env create -f continuous_integration/environment-doc.yml\n    $ conda activate dask_image_doc_env\n\nThis conda environment contains dask-image and its dependencies, sphinx,\nand the dask-sphinx-theme.\n\nNext, build the documentation with sphinx:\n\n.. code-block:: console\n\n    $ cd dask-image/docs\n    $ make html\n\nNow you can preview the html documentation in your browser by opening the file:\ndask-image/docs/_build/html/index.html\n\nSubmit Feedback\n~~~~~~~~~~~~~~~\n\nThe best way to send feedback is to file an issue at https://github.com/dask/dask-image/issues.\n\nIf you are proposing a feature:\n\n* Explain in detail how it would work.\n* Keep the scope as narrow as possible, to make it easier to implement.\n* Remember that this is a volunteer-driven project, and that contributions\n  are welcome :)\n\nGet Started!\n------------\n\nReady to contribute? Here's how to set up `dask-image` for local development.\n\n1. Fork the `dask-image` repo on GitHub.\n2. Clone your fork locally::\n\n    $ git clone git@github.com:your_name_here/dask-image.git\n    $ cd dask-image\n\n3. Install your local copy into an environment. Assuming you have conda installed, this is how you set up your fork for local development (on Windows drop `source`). Replace `\"<some version>\"` with the Python version used for testing.::\n\n    $ conda create -n dask-image-env python=\"<some version>\"\n    $ source activate dask-image-env\n    $ python -m pip install -e .\n\n4. Create a branch for local development::\n\n    $ git checkout -b name-of-your-bugfix-or-feature\n\n   Now you can make your changes locally.\n\n5. When you're done making changes, check that your changes pass flake8 and the tests, including testing other Python versions::\n\n    $ flake8 dask_image tests\n    $ pytest\n\n   To get flake8, just conda install it into your environment.\n\n6. Commit your changes and push your branch to GitHub::\n\n    $ git add .\n    $ git commit -m \"Your detailed description of your changes.\"\n    $ git push origin name-of-your-bugfix-or-feature\n\n7. Submit a pull request through the GitHub website.\n\nPull Request Guidelines\n-----------------------\n\nBefore you submit a pull request, check that it meets these guidelines:\n\n1. The pull request should include tests.\n2. If the pull request adds functionality, the docs should be updated. Put\n   your new functionality into a function with a docstring, and add the\n   feature to the list in README.rst.\n3. The pull request should work for all supported Python versions. Check CIs\n   and make sure that the tests pass for all supported Python versions\n   and platforms.\n\nTesting\n-------\n\nRunning tests locally\n~~~~~~~~~~~~~~~~~~~~~\n\nTo setup a local testing environment that matches the test environments we use\nfor our continuous integration services, you can use the ``.yml``\nconda environment files included in the ``continuous_integration`` folder\nin the dask-image repository.\n\nThere is a separate environment file for each supported Python version.\n\nWe will use conda to\n`create an environment from a file`_\n(``conda env create -f name-of-environment-file.yml``).\n\n.. note::\n    If you don't have `conda`_ installed, we recommend downloading and installing it\n    with the conda-forge distribution `Miniforge`_.\n\n.. code-block:: console\n\n    $ conda env create -f continuous_integration/environment-latest.yml\n\nThis command will create a new conda test environment\ncalled ``dask-image-testenv`` with all required dependencies.\n\nNow you can activate your new testing environment with::\n\n.. code-block:: console\n\n    $ conda activate dask-image-testenv\n\nFinally, install the development version of dask-image::\n\n.. code-block:: console\n\n    $ pip install -e \".[test]\"\"\n\nFor local testing, please run ``pytest`` in the test environment::\n\n.. code-block:: console\n\n    $ pytest\n\n\nTo run a subset of tests, for example all the tests for ndfourier::\n\n    $ pytest tests/test_dask_image/test_ndfourier\n\n.. _create an environment from a file: https://docs.conda.io/projects/conda/en/latest/user-guide/tasks/manage-environments.html#creating-an-environment-from-an-environment-yml-file\n.. _conda: https://conda.io/en/latest/\n.. _Miniforge: https://conda-forge.org/download/\n\nContinuous integration tests\n~~~~~~~~~~~~~~~~~~~~~~~~~~~~~\n\nCreating a pull request will automatically run the continuous integration\ntests with Github Actions.\n\nResults from the continuous integration (CI) checks are shown linked at the bottom \nof your pull request, and also in the dask-image GitHub Actions tab:\nhttps://github.com/dask/dask-image/actions\n\nTo edit the CI checks, see the workflow scripts in the repository located in \n``dask-image/.github/workflows``\n\nGPU continuous integration\n^^^^^^^^^^^^^^^^^^^^^^^^^^^\n\nGPU nightly testing is run in the `rapidsai/dask-upstream-testing repo <https://github.com/rapidsai/dask-upstream-testing>`_\n"
  },
  {
    "path": "HISTORY.rst",
    "content": "=======\nHistory\n=======\n\nv2025.11.0 (2025-11-12)\n-----------------------\n\nWe're pleased to announce the release of dask-image v2025.11.0!\n\nHighlights\n\nThe key highlight of this release is that Marvin Albert added a dask-image\nimplementation of the scipy.ndimage.map_coordinates function (#237).\nThere have also been improvements to the documentation.\n\nNew Features\n\n* Implement support for ndimage.map_coordinates (#237)\n\nImprovements\n\n* Use `tifffile.TiffWriter`'s `write` method in `test_cupy_imread` (#398)\n* Recommend dask.array.image.imread over dask-image imread (#410)\n* Expand dask_image.imread.imread docstring (#411)\n* Add spline filter docstrings (#412)\n* Fix typo (#402)\n\nMaintenance\n\n* ReadTheDocs: fix displayed version number in top left corner (#379)\n* Display dev version numbers on ReadTheDocs latest (#380)\n* Update conf.py, sphinx context injection deprecated in ReadTheDocs (#383)\n* fix KeyError: \"None of [Index(['0_x', '1_x', '0_y', '1_y'], dtype='object')] are in the [columns]\" in find_objects (#384)\n* Bump coverallsapp/github-action from 2.3.0 to 2.3.4 (#390)\n* Fix CI test failures (#393)\n* Recommend miniforge conda installer in docs (#395)\n* Update pytest config key (#396)\n* Drop gpuCI & ref dask-upstream-testing (#401)\n* Bump actions/checkout from 4 to 5 (#406)\n* Bump actions/setup-python from 5 to 6 (#408)\n* Maintenance: split ndinterp long __init__.py file functions into separate files (#416)\n\n\n9 authors added to this release (alphabetical)\n\n* `David Haberthür <https://github.com/dask/dask-image/commits?author=habi>`_ - @habi\n* `David Stansby <https://github.com/dask/dask-image/commits?author=dstansby>`_ - @dstansby\n* `dependabot[bot] <https://github.com/dask/dask-image/commits?author=dependabot[bot]>`_ - @dependabot[bot]\n* `Genevieve Buckley <https://github.com/dask/dask-image/commits?author=GenevieveBuckley>`_ - @GenevieveBuckley\n* `jakirkham <https://github.com/dask/dask-image/commits?author=jakirkham>`_ - @jakirkham\n* `Joshua Gould <https://github.com/dask/dask-image/commits?author=joshua-gould>`_ - @joshua-gould\n* `Kimberly Meechan <https://github.com/dask/dask-image/commits?author=K-Meech>`_ - @K-Meech\n* `Marvin Albert <https://github.com/dask/dask-image/commits?author=m-albert>`_ - @m-albert\n* `Tom Augspurger <https://github.com/dask/dask-image/commits?author=TomAugspurger>`_ - @TomAugspurger\n\n\n4 reviewers added to this release (alphabetical)\n\n* `Genevieve Buckley <https://github.com/dask/dask-image/commits?author=GenevieveBuckley>`_ - @GenevieveBuckley\n* `jakirkham <https://github.com/dask/dask-image/commits?author=jakirkham>`_ - @jakirkham\n* `Marvin Albert <https://github.com/dask/dask-image/commits?author=m-albert>`_ - @m-albert\n* `Thomas Robitaille <https://github.com/dask/dask-image/commits?author=astrofrog>`_ - @astrofrog\n\n\n2024.5.0 (2024-05-17)\n----------------------\n\nWe're pleased to announce the release of dask-image 2024.5.0!\n\nHighlights\n\nHighlights of this release include:\n\n* Martin Schorb adding 'rotate', 'spline_filter' and 'spline_filter1d' functions (#213)\n* Erik Holmgren adding functionality to allow wrapping labels over array boundaries (#344), and \n* Christoph Sommer's work allowing aicsimageio and other da.core.Array sub-classes as input arrays (#361)\n\nNew Features\n\n* Add the rotate, spline_filter, and spline_filter1d functions to ndimage (#213)\n* Wrapping labels over array boundaries (#344)\n* Add python 3.12 support (#370)\n\nImprovements\n\n* Relaxed type check of input array, to allow da.core.Array sub-classes… (#361)\n* Update slice index comment to reflect code change (#353)\n\nMaintenance\n\n* Switch to pyproject.toml package setup, replace versioneer with setuptools-scm (#306)\n* Fix cupy pytest errors (#368)\n* Switch to newer GPU CI images (#345)\n* Bump GPU CI to CUDA 11.8 (#348)\n* Maintenance: fix CI test errors (#366)\n* Update CI test environments (#367)\n* Additions to release guide and change to release note generation script (#339)\n* Fix typo in pull request template (#347)\n* Workaround for the sphinx version problem in the readthedocs build environment (#354)\n* Pin dask to 2024.4.1 to avoid error during dask.dataframe import with python 3.11.9 (#363)\n* Get rid of distutils dependency -- Depend on newer scipy (#346)\n* Bump actions/checkout from 3 to 4 (#342)\n* Bump actions/setup-python from 4 to 5 (#350)\n* Bump coverallsapp/github-action from 2.2.1 to 2.2.3 (#343)\n* Bump conda-incubator/setup-miniconda from 2 to 3 (#349)\n* Bump coverallsapp/github-action from 2.2.3 to 2.3.0 (#365)\n* Update versioneer to version 0.29 for compatibility with python 3.12 (#357)\n\n\n9 authors added to this release (alphabetical)\n\n* `Charles Blackmon-Luca <https://github.com/dask/dask-image/commits?author=charlesbluca>`_ - @charlesbluca\n* `Christoph Sommer <https://github.com/dask/dask-image/commits?author=sommerc>`_ - @sommerc\n* `dependabot[bot] <https://github.com/dask/dask-image/commits?author=dependabot[bot]>`_ - @dependabot[bot]\n* `Erik Holmgren <https://github.com/dask/dask-image/commits?author=Holmgren825>`_ - @Holmgren825\n* `Genevieve Buckley <https://github.com/dask/dask-image/commits?author=GenevieveBuckley>`_ - @GenevieveBuckley\n* `jakirkham <https://github.com/dask/dask-image/commits?author=jakirkham>`_ - @jakirkham\n* `Mark Harfouche <https://github.com/dask/dask-image/commits?author=hmaarrfk>`_ - @hmaarrfk\n* `Martin Schorb <https://github.com/dask/dask-image/commits?author=martinschorb>`_ - @martinschorb\n* `Marvin Albert <https://github.com/dask/dask-image/commits?author=m-albert>`_ - @m-albert\n\n\n5 reviewers added to this release (alphabetical)\n\n* `Erik Holmgren <https://github.com/dask/dask-image/commits?author=Holmgren825>`_ - @Holmgren825\n* `Genevieve Buckley <https://github.com/dask/dask-image/commits?author=GenevieveBuckley>`_ - @GenevieveBuckley\n* `jakirkham <https://github.com/dask/dask-image/commits?author=jakirkham>`_ - @jakirkham\n* `Juan Nunez-Iglesias <https://github.com/dask/dask-image/commits?author=jni>`_ - @jni\n* `Marvin Albert <https://github.com/dask/dask-image/commits?author=m-albert>`_ - @m-albert\n\n\n2023.08.1 (2023-08-04)\n----------------------\n\nWe're pleased to announce the release of dask-image 2023.08.1!\n\nThis is a patch release to complete the dropping of python 3.8\nin the previous release.\n\n* Use `>=3.9` in `python_requires` in `setup.py` (#336)\n\n2 authors added to this release (alphabetical)\n\n* `jakirkham <https://github.com/dask/dask-image/commits?author=jakirkham>`_ - @jakirkham\n* `Marvin Albert <https://github.com/dask/dask-image/commits?author=m-albert>`_ - @m-albert\n\n\n0 reviewers added to this release (alphabetical)\n\n\n2023.08.0 (2023-08-03)\n----------------------\n\nWe're pleased to announce the release of dask-image 2023.08.0!\n\nHighlights\n\nThis version fixes bugs related to processing CuPy backed dask arrays\nand improves testing on GPU CI. It drops support for python 3.8 and\nadds pandas as a dependency. As a feature improvement, the dask-image\nequivalent of ``scipy.ndimage.label`` now supports arbitrary\nstructuring elements.\n\nFor full support of all GPU functionality in dask-image we recommend\nusing CuPy version 9.0.0 or higher.\n\nImprovements\n\n* Generalised ndmeasure.label to arbitrary structuring elements (#321)\n\nBug Fixes\n\n* Added missing cupy test mark and fixed cupy threshold (#329)\n* Moved functions from ndimage submodules to ndimage namespace (#325)\n\nUpdated requirements\n\n* Drop Python 3.8, in accordance with NEP29 recommendation (#315)\n* Require NumPy 1.18+ (#304)\n* Add pandas requirement for find_objs function (#309)\n\nBuild Tools\n\n* Continuous integration\n   * Update GPU conda environment before running tests (#318)\n   * Fix GitHub actions README badge (#323)\n* Dependabot updates\n   * Bump coverallsapp/github-action from 2.0.0 to 2.1.2 (#313)\n   * Bump coverallsapp/github-action from 2.1.2 to 2.2.0 (#322)\n   * Bump coverallsapp/github-action from 2.2.0 to 2.2.1 (#326)\n\n\n6 authors added to this release (alphabetical)\n\n* `Charles Blackmon-Luca <https://github.com/dask/dask-image/commits?author=charlesbluca>`_ - @charlesbluca\n* `David Stansby <https://github.com/dask/dask-image/commits?author=dstansby>`_ - @dstansby\n* `dependabot[bot] <https://github.com/dask/dask-image/commits?author=dependabot[bot]>`_ - @dependabot[bot]\n* `Genevieve Buckley <https://github.com/dask/dask-image/commits?author=GenevieveBuckley>`_ - @GenevieveBuckley\n* `jakirkham <https://github.com/dask/dask-image/commits?author=jakirkham>`_ - @jakirkham\n* `Marvin Albert <https://github.com/dask/dask-image/commits?author=m-albert>`_ - @m-albert\n\n\n4 reviewers added to this release (alphabetical)\n\n* `Charles Blackmon-Luca <https://github.com/dask/dask-image/commits?author=charlesbluca>`_ - @charlesbluca\n* `Genevieve Buckley <https://github.com/dask/dask-image/commits?author=GenevieveBuckley>`_ - @GenevieveBuckley\n* `jakirkham <https://github.com/dask/dask-image/commits?author=jakirkham>`_ - @jakirkham\n* `Juan Nunez-Iglesias <https://github.com/dask/dask-image/commits?author=jni>`_ - @jni\n\n\nv2023.03.0 (2023-03-27)\n-----------------------\n\nWe're pleased to announce the release of dask-image v2023.03.0!\n\nHighlights\n\nThis version of dask-image drops support for python 3.7,\nnow requires a minimum Dask version of 2021.10.0 or higher \n(due to a security patch), and makes tifffile a regular requirement.\nWe also now build and publish wheel files to PyPI.\n\nImprovements\n\n* Documentation\n   * Add GPU CI info to contributing docs (#300)\n   * Docs: add GPU support info to coverage table (#301)\n\n* Testing\n   * Test `gaussian` alias (#287)\n   * Update NaN block size tests for threshold_local function (#289)\n   * Test `find_objects` w/incorrect array type (#292)\n\nDeprecations and updated requirements\n\n* Update supported python versions to 3.8, 3.9, 3.10, & 3.11 (drop python 3.7) (#284)\n* Security update: Dask v2021.10.0 as minimum allowable version (#288)\n* Make tifffile regular requirement (#295)\n\nBuild Tools\n\n* Continuous integration\n   * Refresh doc environment (#273)\n   * Setup Coveralls with GitHub Actions (#274)\n   * Pin to jinja2<3.1 to avoid Readthedocs build error (#278)\n   * Updates `setup.py`'s Python versions (#285)\n   * Combine CI workflows for testing and release upload to PyPI (#291)\n   * Enable option to restart GHA (#293)\n   * Readd `environment-latest.yml` symlink (#294)\n   * Add python 3.10 to gpuCI matrix (#298)\n* Releases\n   * ENH: Build and publish wheels in GitHub CI (#272)\n   * Update release notes script (#299)\n   * Release notes for v2022.09.0 (#270)\n* Dependabot updates\n   * Create dependabot.yml (#279)\n   * Bump actions/setup-python from 2 to 4 (#280)\n   * Bump actions/checkout from 2 to 3 (#281)\n   * Bump coverallsapp/github-action from 1.1.3 to 1.2.2 (#282)\n   * Bump coverallsapp/github-action from 1.2.2 to 1.2.4 (#283)\n   * Bump coverallsapp/github-action from 1.2.4 to 2.0.0 (#296)\n\nOther Pull Requests\n\n* Group all imread functions together in the same file (#290)\n\n7 authors added to this release (alphabetical)\n\n* `Charles Blackmon-Luca <https://github.com/dask/dask-image/commits?author=charlesbluca>`_ - @charlesbluca\n* `dependabot[bot] <https://github.com/dask/dask-image/commits?author=dependabot[bot]>`_ - @dependabot[bot]\n* `Genevieve Buckley <https://github.com/dask/dask-image/commits?author=GenevieveBuckley>`_ - @GenevieveBuckley\n* `jakirkham <https://github.com/dask/dask-image/commits?author=jakirkham>`_ - @jakirkham\n* `Marvin Albert <https://github.com/dask/dask-image/commits?author=m-albert>`_ - @m-albert\n* `Matt McCormick <https://github.com/dask/dask-image/commits?author=thewtex>`_ - @thewtex\n* `Volker Hilsenstein <https://github.com/dask/dask-image/commits?author=VolkerH>`_ - @VolkerH\n\n\n3 reviewers added to this release (alphabetical)\n\n* `Genevieve Buckley <https://github.com/dask/dask-image/commits?author=GenevieveBuckley>`_ - @GenevieveBuckley\n* `jakirkham <https://github.com/dask/dask-image/commits?author=jakirkham>`_ - @jakirkham\n* `Matt McCormick <https://github.com/dask/dask-image/commits?author=thewtex>`_ - @thewtex\n\n\nv2022.09.0 (2022-09-19)\n-----------------------\n\nWe're pleased to announce the release of dask-image v2022.09.0!\n\nNot much has changed since the last release.\nVolker Hilsenstein has improved imread, which now uses natural sorting for strings.\nFred Blunt has fixed deprecation warnings from scipy.ndimage,\nand we've also done some miscellaneous maintenance work.\n\nImprovements\n\n* Use natural sorting in  `imread(...)` when globbing multiple files  (#265)\n* Avoid DeprecationWarnings when importing scipy.ndimage filter functions (#261)\n\n\nMaintenance\n\n* Remove/add testing for python 3.6/3.9, update CI pinnings (#257)\n* Update docs theme for rebranding (#263)\n* Run CI on `main` (#264)\n\n\n6 authors added to this release (alphabetical)\n\n* `Charles Blackmon-Luca <https://github.com/dask/dask-image/commits?author=charlesbluca>`_ - @charlesbluca\n* `Fred Bunt <https://github.com/dask/dask-image/commits?author=fbunt>`_ - @fbunt\n* `Genevieve Buckley <https://github.com/dask/dask-image/commits?author=GenevieveBuckley>`_ - @GenevieveBuckley\n* `jakirkham <https://github.com/dask/dask-image/commits?author=jakirkham>`_ - @jakirkham\n* `Sarah Charlotte Johnson <https://github.com/dask/dask-image/commits?author=scharlottej13>`_ - @scharlottej13\n* `Volker Hilsenstein <https://github.com/dask/dask-image/commits?author=VolkerH>`_ - @VolkerH\n\n\n3 reviewers added to this release (alphabetical)\n\n* `Charles Blackmon-Luca <https://github.com/dask/dask-image/commits?author=charlesbluca>`_ - @charlesbluca\n* `Genevieve Buckley <https://github.com/dask/dask-image/commits?author=GenevieveBuckley>`_ - @GenevieveBuckley\n* `jakirkham <https://github.com/dask/dask-image/commits?author=jakirkham>`_ - @jakirkham\n\n\n2021.12.0\n----------\n\nWe're pleased to announce the release of dask-image 2021.12.0!\n\nHighlights\n\nThe major highlights of this release include the introduction of new featurees for ``find_objects`` and spline filters.\nWe have also moved to using CalVer (calendar version numbers) to match the main Dask project.\n\nNew Features\n\n* Find objects bounding boxes (#240)\n* Add spline_filter and spline_filter1d (#215)\n\n\nImprovements\n\n* ENH: add remaining kwargs to binary_closing and binary_opening (#221)\n* ndfourier: support n > 0 (for rfft) and improve performance (#222)\n* affine_transform: increased shape of required input array slices (#216)\n\n\nBug Fixes\n\n* BUG: add missing import of warnings in dask_image.ndmeasure (#224)\n* Fix wrap bug in ndfilters convolve and correlate (#243)\n* Upgrade for compatibility with latest dask release (#241)\n\n\nTest infrastructure\n\n* GitHub actions testing (#188)\n* Set up gpuCI testing on PRs (#248)\n* Remove `RAPIDS_VER` axis, bump `CUDA_VER` in gpuCI matrix (#249)\n\n\nDocumentation updates\n\n* Code style cleanup (#227)\n* Remove out of date email address, strip __author__ & __email__ (#225)\n* Update release guide, Dask CalVer uses YYYY.MM.DD (#236)\n* Update min python version in setup.py (#250)\n* Use new Dask docs theme (#245)\n* Docs: Add `find_objects` to the coverage table (#254)\n\n\nOther Pull Requests\n\n* Switch to CalVer (calendar versioning) (#233)\n\n\n6 authors added to this release (alphabetical)\n\n* `anlavandier <https://github.com/dask/dask-image/commits?author=anlavandier>`_ - @anlavandier\n* `Charles Blackmon-Luca <https://github.com/dask/dask-image/commits?author=charlesbluca>`_ - @charlesbluca\n* `Genevieve Buckley <https://github.com/dask/dask-image/commits?author=GenevieveBuckley>`_ - @GenevieveBuckley\n* `Gregory R. Lee <https://github.com/dask/dask-image/commits?author=grlee77>`_ - @grlee77\n* `Jacob Tomlinson <https://github.com/dask/dask-image/commits?author=jacobtomlinson>`_ - @jacobtomlinson\n* `Marvin Albert <https://github.com/dask/dask-image/commits?author=m-albert>`_ - @m-albert\n\n\n6 reviewers added to this release (alphabetical)\n\n* `anlavandier <https://github.com/dask/dask-image/commits?author=anlavandier>`_ - @anlavandier\n* `Genevieve Buckley <https://github.com/dask/dask-image/commits?author=GenevieveBuckley>`_ - @GenevieveBuckley\n* `Gregory R. Lee <https://github.com/dask/dask-image/commits?author=grlee77>`_ - @grlee77\n* `Jacob Tomlinson <https://github.com/dask/dask-image/commits?author=jacobtomlinson>`_ - @jacobtomlinson\n* `jakirkham <https://github.com/dask/dask-image/commits?author=jakirkham>`_ - @jakirkham\n* `Marvin Albert <https://github.com/dask/dask-image/commits?author=m-albert>`_ - @m-albert\n\n\n0.6.0 (2021-05-06)\n------------------\n\nWe're pleased to announce the release of dask-image 0.6.0!\n\nHighlights\n\nThe highlights of this release include GPU support for binary morphological\nfunctions, and improvements to the performance of ``imread``.\n\nCupy version 9.0.0 or higher is required for GPU support of the ``ndmorph`` subpackage.\nCupy version 7.7.0 or higher is required for GPU support of the ``ndfilters`` and ``imread`` subpackages.\n\nNew Features\n\n* GPU support for ndmorph subpackage: binary morphological functions (#157)\n\nImprovements\n\n* Improve imread performance: reduced overhead of pim.open calls when reading from image sequence (#182)\n\nBug Fixes\n\n* dask-image imread v0.5.0 not working with dask distributed Client & napari (#194)\n* Not able to map actual image name with dask_image.imread (#200, fixed by #182)\n* affine_transform: Remove inconsistencies with ndimage implementation #205\n\nAPI Changes\n\n* Add alias ``gaussian`` pointing to ``gaussian_filter`` (#193)\n\nOther Pull Requests\n\n* Change default branch from master to main (#185)\n* Fix rst formatting in release_guide.rst (#186)\n\n4 authors added to this release (alphabetical)\n\n* `Genevieve Buckley <https://github.com/dask/dask-image/commits?author=GenevieveBuckley>`_ - @GenevieveBuckley\n* `Julia Signell <https://github.com/dask/dask-image/commits?author=jsignell>`_ - @jsignell\n* `KM Goh <https://github.com/dask/dask-image/commits?author=K-Monty>`_ - @K-Monty\n* `Marvin Albert <https://github.com/dask/dask-image/commits?author=m-albert>`_ - @m-albert\n\n2 reviewers added to this release (alphabetical)\n\n* `Genevieve Buckley <https://github.com/dask/dask-image/commits?author=GenevieveBuckley>`_ - @GenevieveBuckley\n* `KM Goh <https://github.com/dask/dask-image/commits?author=K-Monty>`_ - @K-Monty\n\n0.5.0 (2021-02-01)\n------------------\n\nWe're pleased to announce the release of dask-image 0.5.0!\n\nHighlights\n\nThe biggest highlight of this release is our new affine transformation feature, contributed by Marvin Albert.\nThe SciPy Japan sprint in November 2020 led to many improvements, and I'd like to recognise the hard work by Tetsuo Koyama and Kuya Takami.\nSpecial thanks go to everyone who joined us at the conference!\n\nNew Features\n\n* Affine transformation feature added: from dask_image.ndinterp import affine_transform (#159)\n* GPU support added for local_threshold with method='mean' (#158)\n* Pathlib input now accepted for imread functions (#174)\n\nImprovements\n\n* Performance improvement for 'imread', we now use `da.map_blocks` instead of `da.concatenate` (#165)\n\nBug Fixes\n\n* Fixed imread tests (add `contiguous=True` when saving test data with tifffile) (#164)\n* FIXed scipy LooseVersion for sum_labels check (#176)\n\nAPI Changes\n\n* 'sum' is renamed to 'sum_labels' and a add deprecation warning added (#172)\n\nDocumentation improvements\n\n* Add section Talks and Slides #163 (#169)\n* Add link to SciPy Japan 2020 talk (#171)\n* Add development guide to setup environment and run tests (#170)\n* Update information in AUTHORS.rst (#167)\n\nMaintenance\n\n* Update dependencies in Read The Docs environment (#168)\n\n6 authors added to this release (alphabetical)\n\n* `Fabian Chong <https://github.com/dask/dask-image/commits?author=feiming>`_ - @feiming\n* `Genevieve Buckley <https://github.com/dask/dask-image/commits?author=GenevieveBuckley>`_ - @GenevieveBuckley\n* `jakirkham <https://github.com/dask/dask-image/commits?author=jakirkham>`_ - @jakirkham\n* `Kuya Takami <https://github.com/dask/dask-image/commits?author=ku-ya>`_ - @ku-ya\n* `Marvin Albert <https://github.com/dask/dask-image/commits?author=m-albert>`_ - @m-albert\n* `Tetsuo Koyama <https://github.com/dask/dask-image/commits?author=tkoyama010>`_ - @tkoyama010\n\n\n7 reviewers added to this release (alphabetical)\n\n* `Fabian Chong <https://github.com/dask/dask-image/commits?author=feiming>`_ - @feiming\n* `Genevieve Buckley <https://github.com/dask/dask-image/commits?author=GenevieveBuckley>`_ - @GenevieveBuckley\n* `Gregory R. Lee <https://github.com/dask/dask-image/commits?author=grlee77>`_ - @grlee77\n* `jakirkham <https://github.com/dask/dask-image/commits?author=jakirkham>`_ - @jakirkham\n* `Juan Nunez-Iglesias <https://github.com/dask/dask-image/commits?author=jni>`_ - @jni\n* `Marvin Albert <https://github.com/dask/dask-image/commits?author=m-albert>`_ - @m-albert\n* `Tetsuo Koyama <https://github.com/dask/dask-image/commits?author=tkoyama010>`_ - @tkoyama010\n\n0.4.0 (2020-09-02)\n------------------\n\nWe're pleased to announce the release of dask-image 0.4.0!\n\nHighlights\n\nThe major highlight of this release is support for cupy GPU arrays for dask-image subpackages imread and ndfilters.\nCupy version 7.7.0 or higher is required to use this functionality.\nGPU support for the remaining dask-image subpackages (ndmorph, ndfourier, and ndmeasure) will be rolled out at a later date, beginning with ndmorph.\n\nWe also have a new function, threshold_local, similar to the scikit-image local threshold function.\n\nLastly, we've made more improvements to the user documentation, which includes work by new contributor @abhisht51.\n\nNew Features\n\n* GPU support for ndfilters & imread modules (#151)\n* threshold_local function for dask-image ndfilters (#112)\n\nImprovements\n\n* Add function coverage table to the dask-image docs (#155)\n* Developer documentation: release guide (#142)\n* Use tifffile for testing instead of scikit-image (#145)\n\n\n3 authors added to this release (alphabetical)\n\n* `Abhisht Singh <https://github.com/dask/dask-image/commits?author=abhisht51>`_ - @abhisht51\n* `Genevieve Buckley <https://github.com/dask/dask-image/commits?author=GenevieveBuckley>`_ - @GenevieveBuckley\n* `jakirkham <https://github.com/dask/dask-image/commits?author=jakirkham>`_ - @jakirkham\n\n\n2 reviewers added to this release (alphabetical)\n\n* `Genevieve Buckley <https://github.com/dask/dask-image/commits?author=GenevieveBuckley>`_ - @GenevieveBuckley\n* `Juan Nunez-Iglesias <https://github.com/dask/dask-image/commits?author=jni>`_ - @jni\n\n0.3.0 (2020-06-06)\n------------------\n\nWe're pleased to announce the release of dask-image 0.3.0!\n\nHighlights\n\n* Python 3.8 is now supported (#131)\n* Support for Python 2.7 and 3.5 has been dropped (#119) (#131)\n* We have a dask-image quickstart guide (#108), available from the dask examples page: https://examples.dask.org/applications/image-processing.html\n\nNew Features\n\n* Distributed labeling has been implemented (#94)\n* Area measurement function added to dask_image.ndmeasure (#115)\n\nImprovements\n\n* Optimize out first `where` in `label` (#102)\n\nBug Fixes\n\n* Bugfix in `center_of_mass` to correctly handle integer input arrays (#122)\n* Test float cast in `_norm_args` (#105)\n* Handle Dask's renaming of `atop` to `blockwise` (#98)\n\nAPI Changes\n\n* Rename the input argument to image in the ndimage functions (#117)\n* Rename labels in ndmeasure function arguments (#126)\n\nSupport\n\n* Update installation instructions so conda is the preferred method (#88)\n* Add Python 3.7 to Travis CI (#89)\n* Add instructions for building docs with sphinx to CONTRIBUTING.rst (#90)\n* Sort Python 3.7 requirements (#91)\n* Use double equals for exact package versions (#92)\n* Use flake8 (#93)\n* Note Python 3.7 support (#95)\n* Fix the Travis MacOS builds (update XCode to version 9.4 and use matplotlib 'Agg' backend) (#113)\n\n7 authors added to this release (alphabetical)\n\n* `Amir Khalighi <https://github.com/dask/dask-image/commits?author=akhalighi>`_ - @akhalighi\n* `Elliana May <https://github.com/dask/dask-image/commits?author=Mause>`_ - @Mause\n* `Genevieve Buckley <https://github.com/dask/dask-image/commits?author=GenevieveBuckley>`_ - @GenevieveBuckley\n* `jakirkham <https://github.com/dask/dask-image/commits?author=jakirkham>`_ - @jakirkham\n* `Jaromir Latal <https://github.com/dask/dask-image/commits?author=jermenkoo>`_ - @jermenkoo\n* `Juan Nunez-Iglesias <https://github.com/dask/dask-image/commits?author=jni>`_ - @jni\n* `timbo8 <https://github.com/dask/dask-image/commits?author=timbo8>`_ - @timbo8\n\n2 reviewers added to this release (alphabetical)\n\n- `Genevieve Buckley <https://github.com/dask/dask-image/commits?author=GenevieveBuckley>`_ - @GenevieveBuckley\n- `jakirkham <https://github.com/dask/dask-image/commits?author=jakirkham>`_ - @jakirkham\n\n0.2.0 (2018-10-10)\n------------------\n\n* Construct separate label masks in `labeled_comprehension` (#82)\n* Use `full` to construct 1-D NumPy array (#83)\n* Use NumPy's `ndindex` in `labeled_comprehension` (#81)\n* Cleanup `test_labeled_comprehension_struct` (#80)\n* Use 1-D structured array fields for position-based kernels in `ndmeasure` (#79)\n* Rewrite `center_of_mass` using `labeled_comprehension` (#78)\n* Adjust `extrema`'s internal structured type handling (#77)\n* Test labeled_comprehension with object type (#76)\n* Rewrite `histogram` to use `labeled_comprehension` (#75)\n* Use labeled_comprehension directly in more function in ndmeasure (#74)\n* Update mean's variables to match other functions (#73)\n* Consolidate summation in `_ravel_shape_indices` (#72)\n* Update HISTORY for 0.1.2 release (#71)\n* Bump dask-sphinx-theme to 1.1.0 (#70)\n\n0.1.2 (2018-09-17)\n------------------\n\n* Ensure `labeled_comprehension`'s `default` is 1D. (#69)\n* Bump dask-sphinx-theme to 1.0.5. (#68)\n* Use nout=2 in ndmeasure's label. (#67)\n* Use custom kernel for extrema. (#61)\n* Handle structured dtype in labeled_comprehension. (#66)\n* Fixes for `_unravel_index`. (#65)\n* Bump dask-sphinx-theme to 1.0.4. (#64)\n* Unwrap some lines. (#63)\n* Use dask-sphinx-theme. (#62)\n* Refactor out `_unravel_index` function. (#60)\n* Divide `sigma` by `-2`. (#59)\n* Use Python 3's definition of division in Python 2. (#58)\n* Force dtype of `prod` in `_ravel_shape_indices`. (#57)\n* Drop vendored compatibility code. (#54)\n* Drop vendored copy of indices and uses thereof. (#56)\n* Drop duplicate utility tests from `ndmorph`. (#55)\n* Refactor utility module for imread. (#53)\n* Reuse `ndfilter` utility function in `ndmorph`. (#52)\n* Cleanup freq_grid_i construction in _get_freq_grid. (#51)\n* Use shared Python 2/3 compatibility module. (#50)\n* Consolidate Python 2/3 compatibility code. (#49)\n* Refactor Python 2/3 compatibility from imread. (#48)\n* Perform `2 * pi` first in `_get_ang_freq_grid`. (#47)\n* Ensure `J` is negated first in `fourier_shift`. (#46)\n* Breakout common changes in fourier_gaussian. (#45)\n* Use conda-forge badge. (#44)\n\n0.1.1 (2018-08-31)\n------------------\n\n* Fix a bug in an ndmeasure test of an internal function.\n\n0.1.0 (2018-08-31)\n------------------\n\n* First release on PyPI.\n* Pulls in content from dask-image org.\n* Supports reading of image files into Dask.\n* Provides basic N-D filters with options to extend.\n* Provides a few N-D Fourier filters.\n* Provides a few N-D morphological filters.\n* Provides a few N-D measurement functions for label images.\n* Has 100% line coverage in test suite.\n"
  },
  {
    "path": "LICENSE.txt",
    "content": "Copyright (c) 2017-2018, dask-image Developers (see AUTHORS.rst for details)\nAll rights reserved.\n\nRedistribution and use in source and binary forms, with or without\nmodification, are permitted provided that the following conditions are met:\n\n1. Redistributions of source code must retain the above copyright notice, this\nlist of conditions and the following disclaimer.\n\n2. Redistributions in binary form must reproduce the above copyright notice,\nthis list of conditions and the following disclaimer in the documentation\nand/or other materials provided with the distribution.\n\n3. Neither the name of the copyright holder nor the names of its contributors\nmay be used to endorse or promote products derived from this software without\nspecific prior written permission.\n\nTHIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS \"AS IS\" AND\nANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED\nWARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE\nDISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE LIABLE\nFOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL\nDAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR\nSERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER\nCAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY,\nOR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE\nOF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.\n"
  },
  {
    "path": "MANIFEST.in",
    "content": "include AUTHORS.rst\ninclude CONTRIBUTING.rst\ninclude HISTORY.rst\ninclude LICENSE.txt\ninclude README.rst\n\nrecursive-include tests *\nrecursive-exclude * __pycache__\nrecursive-exclude * *.py[co]\n\nrecursive-include docs *.rst conf.py Makefile make.bat *.jpg *.png *.gif\n\ninclude dask_image/_version.py\n"
  },
  {
    "path": "Makefile",
    "content": ".PHONY: clean clean-test clean-pyc clean-build docs help\n.DEFAULT_GOAL := help\ndefine BROWSER_PYSCRIPT\nimport os, webbrowser, sys\ntry:\n\tfrom urllib import pathname2url\nexcept:\n\tfrom urllib.request import pathname2url\n\nwebbrowser.open(\"file://\" + pathname2url(os.path.abspath(sys.argv[1])))\nendef\nexport BROWSER_PYSCRIPT\n\ndefine PRINT_HELP_PYSCRIPT\nimport re, sys\n\nfor line in sys.stdin:\n\tmatch = re.match(r'^([a-zA-Z_-]+):.*?## (.*)$$', line)\n\tif match:\n\t\ttarget, help = match.groups()\n\t\tprint(\"%-20s %s\" % (target, help))\nendef\nexport PRINT_HELP_PYSCRIPT\nBROWSER := python -c \"$$BROWSER_PYSCRIPT\"\n\nhelp:\n\t@python -c \"$$PRINT_HELP_PYSCRIPT\" < $(MAKEFILE_LIST)\n\nclean: clean-build clean-pyc clean-test ## remove all build, test, coverage and Python artifacts\n\n\nclean-build: ## remove build artifacts\n\trm -fr build/\n\trm -fr dist/\n\trm -fr .eggs/\n\tfind . -name '*.egg-info' -exec rm -fr {} +\n\tfind . -name '*.egg' -exec rm -f {} +\n\nclean-pyc: ## remove Python file artifacts\n\tfind . -name '*.pyc' -exec rm -f {} +\n\tfind . -name '*.pyo' -exec rm -f {} +\n\tfind . -name '*~' -exec rm -f {} +\n\tfind . -name '__pycache__' -exec rm -fr {} +\n\nclean-test: ## remove test and coverage artifacts\n\trm -fr .tox/\n\trm -f .coverage\n\trm -fr htmlcov/\n\nlint: ## check style with flake8\n\tflake8 dask_image tests\n\ntest: ## run tests quickly with the default Python\n\tpython -m pip install \".[test]\"\n\tpytest\n\ntest-all: ## run tests on every Python version with tox\n\ttox\n\ncoverage: ## check code coverage quickly with the default Python\n\tcoverage run -m pytest\n\tcoverage report -m\n\tcoverage html\n\t$(BROWSER) htmlcov/index.html\n\ndocs: ## generate Sphinx HTML documentation, including API docs\n\trm -f docs/dask_image.rst\n\trm -f docs/modules.rst\n\tsphinx-apidoc -o docs/ dask_image\n\t$(MAKE) -C docs clean\n\t$(MAKE) -C docs html\n\t$(BROWSER) docs/_build/html/index.html\n\nrelease: clean ## package and upload a release\n\tpython -m build\n\tls -l dist\n\ttwine upload dist/*\n\ndist: clean ## builds source and wheel package\n\tpython -m build\n\tls -l dist\n\ninstall: clean ## install the package to the active Python's site-packages\n\tpython -m pip install .\n"
  },
  {
    "path": "README.rst",
    "content": "==========\ndask-image\n==========\n\n\n.. image:: https://img.shields.io/pypi/v/dask-image.svg\n        :target: https://pypi.python.org/pypi/dask-image\n        :alt: PyPI\n\n.. image:: https://img.shields.io/conda/vn/conda-forge/dask-image.svg\n        :target: https://anaconda.org/conda-forge/dask-image\n        :alt: conda-forge\n\n.. image:: https://github.com/dask/dask-image/actions/workflows/test_and_deploy.yml/badge.svg\n        :target: https://github.com/dask/dask-image/actions/workflows/test_and_deploy.yml\n        :alt: GitHub Actions CI\n\n.. image:: https://readthedocs.org/projects/dask-image/badge/?version=latest\n        :target: https://dask-image.readthedocs.io/en/latest/?badge=latest\n        :alt: Read the Docs\n\n.. image:: https://coveralls.io/repos/github/dask/dask-image/badge.svg\n        :target: https://coveralls.io/github/dask/dask-image\n        :alt: Coveralls\n\n.. image:: https://img.shields.io/github/license/dask/dask-image.svg\n        :target: ./LICENSE.txt\n        :alt: License\n\n\nDistributed image processing\n\n\n* Free software: BSD 3-Clause\n* Documentation: https://dask-image.readthedocs.io.\n"
  },
  {
    "path": "continuous_integration/environment-3.10.yml",
    "content": "name: dask-image-testenv\n\nchannels:\n  - conda-forge\n\ndependencies:\n  - python=3.10.*\n  - pip==23.0.1\n  - coverage==7.2.1\n  - flake8==6.0.0\n  - pytest==7.2.2\n  - pytest-cov==4.0.0\n  - pytest-flake8==1.3.0\n  - pytest-timeout >=2.3.1\n  - dask==2024.4.1\n  - numpy==1.24.2\n  - scipy==1.10.1\n  - scikit-image==0.19.3\n  - pims==0.6.1\n  - slicerator==1.1.0\n  - pandas==2.0.0\n  - twine==5.0.0\n  - pip:\n    - build==1.2.1"
  },
  {
    "path": "continuous_integration/environment-3.11.yml",
    "content": "name: dask-image-testenv\n\nchannels:\n  - conda-forge\n\ndependencies:\n  - python=3.11.*\n  - pip==23.0.1\n  - coverage==7.2.1\n  - flake8==6.0.0\n  - pytest==7.2.2\n  - pytest-cov==4.0.0\n  - pytest-flake8==1.3.0\n  - pytest-timeout >=2.3.1\n  - dask==2024.4.1\n  - numpy==1.24.2\n  - scipy==1.10.1\n  - scikit-image==0.19.3\n  - pims==0.6.1\n  - slicerator==1.1.0\n  - pandas==2.0.0\n  - twine==5.0.0\n  - pip:\n    - build==1.2.1"
  },
  {
    "path": "continuous_integration/environment-3.12.yml",
    "content": "name: dask-image-testenv\n\nchannels:\n  - conda-forge\n\ndependencies:\n  - python=3.12.*\n  - pip==24.0\n  - coverage==7.5.1\n  - flake8==7.0.0\n  - pytest==8.2.0\n  - pytest-cov==5.0.0\n  - pytest-flake8==1.3.0\n  - pytest-timeout >=2.3.1\n  - dask==2024.4.1\n  - numpy==1.26.4\n  - scipy==1.13.0\n  - scikit-image==0.22.0\n  - pims==0.6.1\n  - slicerator==1.1.0\n  - pandas==2.2.2\n  - twine==5.0.0\n  - pip:\n    - build==1.2.1"
  },
  {
    "path": "continuous_integration/environment-3.9.yml",
    "content": "name: dask-image-testenv\n\nchannels:\n  - conda-forge\n\ndependencies:\n  - python=3.9.*\n  - pip==24.0\n  - coverage==7.5.1\n  - flake8==7.0.0\n  - pytest==8.2.0\n  - pytest-cov==5.0.0\n  - pytest-flake8==1.3.0\n  - pytest-timeout >=2.3.1\n  - dask==2024.4.1\n  - numpy==1.26.4\n  - scipy==1.13.0\n  - scikit-image==0.22.0\n  - pims==0.6.1\n  - slicerator==1.1.0\n  - pandas==2.2.2\n  - twine==5.0.0\n  - pip:\n    - build==1.2.1\n"
  },
  {
    "path": "continuous_integration/environment-doc.yml",
    "content": "name: dask_image_doc_env\n\nchannels:\n  - conda-forge\n\ndependencies:\n  - python=3.9.*\n  - pip==22.3\n  - jinja2<3.1\n  - dask==2024.4.1\n  - numpy==1.23.4\n  - scipy==1.9.2\n  - scikit-image==0.19.3\n  - pims==0.6.1\n  - slicerator==1.1.0\n  - pandas==2.0.0\n  - pip:\n    - build==1.2.1\n    - ..  # install dask_image from this repository source\n    # FIXME: This workaround is required until we have sphinx>=5, as enabled by\n    #        dask-sphinx-theme no longer pinning sphinx-book-theme==0.2.0. This is\n    #        tracked in https://github.com/dask/dask-sphinx-theme/issues/68.\n    #        Once sphinx>=5 is available, we can remove this workaround.\n    - dask-sphinx-theme>=3.0.0\n    - sphinx>=4.0.0\n    - sphinxcontrib-applehelp>=1.0.0,<1.0.7\n    - sphinxcontrib-devhelp>=1.0.0,<1.0.6\n    - sphinxcontrib-htmlhelp>=2.0.0,<2.0.5\n    - sphinxcontrib-serializinghtml>=1.1.0,<1.1.10\n    - sphinxcontrib-qthelp>=1.0.0,<1.0.7"
  },
  {
    "path": "dask_image/__init__.py",
    "content": ""
  },
  {
    "path": "dask_image/dispatch/__init__.py",
    "content": ""
  },
  {
    "path": "dask_image/dispatch/_dispatch_ndfilters.py",
    "content": "# -*- coding: utf-8 -*-\n\nimport numpy as np\nimport scipy.ndimage\n\nfrom ._dispatcher import Dispatcher\n\n__all__ = [\n    \"dispatch_convolve\",\n    \"dispatch_correlate\",\n    \"dispatch_laplace\",\n    \"dispatch_prewitt\",\n    \"dispatch_sobel\",\n    \"dispatch_gaussian_filter\",\n    \"dispatch_gaussian_gradient_magnitude\",\n    \"dispatch_gaussian_laplace\",\n    \"dispatch_generic_filter\",\n    \"dispatch_minimum_filter\",\n    \"dispatch_median_filter\",\n    \"dispatch_maximum_filter\",\n    \"dispatch_rank_filter\",\n    \"dispatch_percentile_filter\",\n    \"dispatch_uniform_filter\",\n    \"dispatch_threshold_local_mean\",\n]\n\n\ndispatch_convolve = Dispatcher(name=\"dispatch_convolve\")\ndispatch_correlate = Dispatcher(name=\"dispatch_correlate\")\ndispatch_laplace = Dispatcher(name=\"dispatch_laplace\")\ndispatch_prewitt = Dispatcher(name=\"dispatch_prewitt\")\ndispatch_sobel = Dispatcher(name=\"dispatch_sobel\")\ndispatch_gaussian_filter = Dispatcher(name=\"dispatch_gaussian_filter\")\ndispatch_gaussian_gradient_magnitude = Dispatcher(name=\"dispatch_gaussian_gradient_magnitude\")  # noqa: E501\ndispatch_gaussian_laplace = Dispatcher(name=\"dispatch_gaussian_laplace\")\ndispatch_generic_filter = Dispatcher(name=\"dispatch_generic_filter\")\ndispatch_minimum_filter = Dispatcher(name=\"dispatch_minimum_filter\")\ndispatch_median_filter = Dispatcher(name=\"dispatch_median_filter\")\ndispatch_maximum_filter = Dispatcher(name=\"dispatch_maximum_filter\")\ndispatch_rank_filter = Dispatcher(name=\"dispatch_rank_filter\")\ndispatch_percentile_filter = Dispatcher(name=\"dispatch_percentile_filter\")\ndispatch_uniform_filter = Dispatcher(name=\"dispatch_uniform_filter\")\ndispatch_threshold_local_mean = Dispatcher(name=\"dispatch_threshold_local_mean\")  # noqa: E501\n\n\n# ================== convolve ==================\n@dispatch_convolve.register(np.ndarray)\ndef numpy_convolve(*args, **kwargs):\n    return scipy.ndimage.convolve\n\n\n@dispatch_convolve.register_lazy(\"cupy\")\ndef register_cupy_convolve():\n    import cupy\n    import cupyx.scipy.ndimage\n\n    @dispatch_convolve.register(cupy.ndarray)\n    def cupy_convolve(*args, **kwargs):\n        return cupyx.scipy.ndimage.convolve\n\n\n# ================== correlate ==================\n@dispatch_correlate.register(np.ndarray)\ndef numpy_correlate(*args, **kwargs):\n    return scipy.ndimage.correlate\n\n\n@dispatch_correlate.register_lazy(\"cupy\")\ndef register_cupy_correlate():\n    import cupy\n    import cupyx.scipy.ndimage\n\n    @dispatch_correlate.register(cupy.ndarray)\n    def cupy_correlate(*args, **kwargs):\n        return cupyx.scipy.ndimage.correlate\n\n\n# ================== laplace ==================\n@dispatch_laplace.register(np.ndarray)\ndef numpy_laplace(*args, **kwargs):\n    return scipy.ndimage.laplace\n\n\n@dispatch_laplace.register_lazy(\"cupy\")\ndef register_cupy_laplace():\n    import cupy\n    import cupyx.scipy.ndimage\n\n    @dispatch_laplace.register(cupy.ndarray)\n    def cupy_laplace(*args, **kwargs):\n        return cupyx.scipy.ndimage.laplace\n\n\n# ================== prewitt ==================\n@dispatch_prewitt.register(np.ndarray)\ndef numpy_prewitt(*args, **kwargs):\n    return scipy.ndimage.prewitt\n\n\n@dispatch_prewitt.register_lazy(\"cupy\")\ndef register_cupy_prewitt():\n    import cupy\n    import cupyx.scipy.ndimage\n\n    @dispatch_prewitt.register(cupy.ndarray)\n    def cupy_prewitt(*args, **kwargs):\n        return cupyx.scipy.ndimage.prewitt\n\n\n# ================== sobel ==================\n@dispatch_sobel.register(np.ndarray)\ndef numpy_sobel(*args, **kwargs):\n    return scipy.ndimage.sobel\n\n\n@dispatch_sobel.register_lazy(\"cupy\")\ndef register_cupy_sobel():\n    import cupy\n    import cupyx.scipy.ndimage\n\n    @dispatch_sobel.register(cupy.ndarray)\n    def cupy_sobel(*args, **kwargs):\n        return cupyx.scipy.ndimage.sobel\n\n\n# ================== gaussian_filter ==================\n@dispatch_gaussian_filter.register(np.ndarray)\ndef numpy_gaussian_filter(*args, **kwargs):\n    return scipy.ndimage.gaussian_filter\n\n\n@dispatch_gaussian_filter.register_lazy(\"cupy\")\ndef register_cupy_gaussian_filter():\n    import cupy\n    import cupyx.scipy.ndimage\n\n    @dispatch_gaussian_filter.register(cupy.ndarray)\n    def cupy_gaussian_filter(*args, **kwargs):\n        return cupyx.scipy.ndimage.gaussian_filter\n\n\n# ================== gaussian_gradient_magnitude ==================\n@dispatch_gaussian_gradient_magnitude.register(np.ndarray)\ndef numpy_gaussian_gradient_magnitude(*args, **kwargs):\n    return scipy.ndimage.gaussian_gradient_magnitude\n\n\n@dispatch_gaussian_gradient_magnitude.register_lazy(\"cupy\")\ndef register_cupy_gaussian_gradient_magnitude():\n    import cupy\n    import cupyx.scipy.ndimage\n\n    @dispatch_gaussian_gradient_magnitude.register(cupy.ndarray)\n    def cupy_gaussian_gradient_magnitude(*args, **kwargs):\n        return cupyx.scipy.ndimage.gaussian_gradient_magnitude\n\n\n# ================== gaussian_laplace ==================\n@dispatch_gaussian_laplace.register(np.ndarray)\ndef numpy_gaussian_laplace(*args, **kwargs):\n    return scipy.ndimage.gaussian_laplace\n\n\n@dispatch_gaussian_laplace.register_lazy(\"cupy\")\ndef register_cupy_gaussian_laplace():\n    import cupy\n    import cupyx.scipy.ndimage\n\n    @dispatch_gaussian_laplace.register(cupy.ndarray)\n    def cupy_gaussian_laplace(*args, **kwargs):\n        return cupyx.scipy.ndimage.gaussian_laplace\n\n\n# ================== generic_filter ==================\n@dispatch_generic_filter.register(np.ndarray)\ndef numpy_generic_filter(*args, **kwargs):\n    return scipy.ndimage.generic_filter\n\n\n@dispatch_generic_filter.register_lazy(\"cupy\")\ndef register_cupy_generic_filter():\n    import cupy\n    import cupyx.scipy.ndimage\n\n    @dispatch_generic_filter.register(cupy.ndarray)\n    def cupy_generic_filter(*args, **kwargs):\n        return cupyx.scipy.ndimage.generic_filter\n\n\n# ================== minimum_filter ==================\n@dispatch_minimum_filter.register(np.ndarray)\ndef numpy_minimum_filter(*args, **kwargs):\n    return scipy.ndimage.minimum_filter\n\n\n@dispatch_minimum_filter.register_lazy(\"cupy\")\ndef register_cupy_minimum_filter():\n    import cupy\n    import cupyx.scipy.ndimage\n\n    @dispatch_minimum_filter.register(cupy.ndarray)\n    def cupy_minimum_filter(*args, **kwargs):\n        return cupyx.scipy.ndimage.minimum_filter\n\n\n# ================== median_filter ==================\n@dispatch_median_filter.register(np.ndarray)\ndef numpy_median_filter(*args, **kwargs):\n    return scipy.ndimage.median_filter\n\n\n@dispatch_median_filter.register_lazy(\"cupy\")\ndef register_cupy_median_filter():\n    import cupy\n    import cupyx.scipy.ndimage\n\n    @dispatch_median_filter.register(cupy.ndarray)\n    def cupy_median_filter(*args, **kwargs):\n        return cupyx.scipy.ndimage.median_filter\n\n\n# ================== maximum_filter ==================\n@dispatch_maximum_filter.register(np.ndarray)\ndef numpy_maximum_filter(*args, **kwargs):\n    return scipy.ndimage.maximum_filter\n\n\n@dispatch_maximum_filter.register_lazy(\"cupy\")\ndef register_cupy_maximum_filter():\n    import cupy\n    import cupyx.scipy.ndimage\n\n    @dispatch_maximum_filter.register(cupy.ndarray)\n    def cupy_maximum_filter(*args, **kwargs):\n        return cupyx.scipy.ndimage.maximum_filter\n\n\n# ================== rank_filter ==================\n@dispatch_rank_filter.register(np.ndarray)\ndef numpy_rank_filter(*args, **kwargs):\n    return scipy.ndimage.rank_filter\n\n\n@dispatch_rank_filter.register_lazy(\"cupy\")\ndef register_cupy_rank_filter():\n    import cupy\n    import cupyx.scipy.ndimage\n\n    @dispatch_rank_filter.register(cupy.ndarray)\n    def cupy_rank_filter(*args, **kwargs):\n        return cupyx.scipy.ndimage.rank_filter\n\n\n# ================== percentile_filter ==================\n@dispatch_percentile_filter.register(np.ndarray)\ndef numpy_percentile_filter(*args, **kwargs):\n    return scipy.ndimage.percentile_filter\n\n\n@dispatch_percentile_filter.register_lazy(\"cupy\")\ndef register_cupy_percentile_filter():\n    import cupy\n    import cupyx.scipy.ndimage\n\n    @dispatch_percentile_filter.register(cupy.ndarray)\n    def cupy_percentile_filter(*args, **kwargs):\n        return cupyx.scipy.ndimage.percentile_filter\n\n\n# ================== uniform_filter ==================\n@dispatch_uniform_filter.register(np.ndarray)\ndef numpy_uniform_filter(*args, **kwargs):\n    return scipy.ndimage.uniform_filter\n\n\n@dispatch_uniform_filter.register_lazy(\"cupy\")\ndef register_cupy_uniform_filter():\n    import cupy\n    import cupyx.scipy.ndimage\n\n    @dispatch_uniform_filter.register(cupy.ndarray)\n    def cupy_uniform_filter(*args, **kwargs):\n        return cupyx.scipy.ndimage.uniform_filter\n\n\n# ================== threshold_local_mean ==================\n@dispatch_threshold_local_mean.register(np.ndarray)\ndef numpy_threshold_local_mean(*args, **kwargs):\n    return np.mean\n\n\n@dispatch_threshold_local_mean.register_lazy(\"cupy\")\ndef register_cupy_threshold_local_mean():\n    import cupy\n\n    @dispatch_threshold_local_mean.register(cupy.ndarray)\n    def cupy_threshold_local_mean(*args, **kwargs):\n        # Code snippet taken from https://github.com/cupy/cupy/issues/3909\n        my_mean = cupy.ReductionKernel(\n            'T x',  # input params\n            'T y',  # output params\n            'x',  # map\n            'a + b',  # reduce\n            'y = a / _in_ind.size()',  # An undocumented variable and a hack\n            '0',  # identity value\n            'mean'  # kernel name\n        )\n        return my_mean\n"
  },
  {
    "path": "dask_image/dispatch/_dispatch_ndinterp.py",
    "content": "# -*- coding: utf-8 -*-\n\nimport numpy as np\nfrom scipy import ndimage\n\nfrom ._dispatcher import Dispatcher\n\n__all__ = [\n    \"dispatch_affine_transform\",\n    \"dispatch_asarray\",\n]\n\ndispatch_affine_transform = Dispatcher(name=\"dispatch_affine_transform\")\n\n\n# ================== affine_transform ==================\n@dispatch_affine_transform.register(np.ndarray)\ndef numpy_affine_transform(*args, **kwargs):\n    return ndimage.affine_transform\n\n\n@dispatch_affine_transform.register_lazy(\"cupy\")\ndef register_cupy_affine_transform():\n    import cupy\n    import cupyx.scipy.ndimage\n\n    @dispatch_affine_transform.register(cupy.ndarray)\n    def cupy_affine_transform(*args, **kwargs):\n\n        return cupyx.scipy.ndimage.affine_transform\n\n\ndispatch_spline_filter = Dispatcher(name=\"dispatch_spline_filter\")\n\n\n# ================== spline_filter ==================\n@dispatch_spline_filter.register(np.ndarray)\ndef numpy_spline_filter(*args, **kwargs):\n    return ndimage.spline_filter\n\n\n@dispatch_spline_filter.register_lazy(\"cupy\")\ndef register_cupy_spline_filter():\n    import cupy\n    import cupyx.scipy.ndimage\n\n    @dispatch_spline_filter.register(cupy.ndarray)\n    def cupy_spline_filter(*args, **kwargs):\n\n        return cupyx.scipy.ndimage.spline_filter\n\n\ndispatch_spline_filter1d = Dispatcher(name=\"dispatch_spline_filter1d\")\n\n\n# ================== spline_filter1d ==================\n@dispatch_spline_filter1d.register(np.ndarray)\ndef numpy_spline_filter1d(*args, **kwargs):\n    return ndimage.spline_filter1d\n\n\n@dispatch_spline_filter1d.register_lazy(\"cupy\")\ndef register_cupy_spline_filter1d():\n    import cupy\n    import cupyx.scipy.ndimage\n\n    @dispatch_spline_filter1d.register(cupy.ndarray)\n    def cupy_spline_filter1d(*args, **kwargs):\n\n        return cupyx.scipy.ndimage.spline_filter1d\n\n\ndispatch_asarray = Dispatcher(name=\"dispatch_asarray\")\n\n\n# ===================== asarray ========================\n@dispatch_asarray.register(np.ndarray)\ndef numpy_asarray(*args, **kwargs):\n    return np.asarray\n\n\n@dispatch_asarray.register_lazy(\"cupy\")\ndef register_cupy_asarray():\n    import cupy\n\n    @dispatch_asarray.register(cupy.ndarray)\n    def cupy_asarray(*args, **kwargs):\n\n        return cupy.asarray\n"
  },
  {
    "path": "dask_image/dispatch/_dispatch_ndmorph.py",
    "content": "# -*- coding: utf-8 -*-\n\nimport numpy as np\nimport scipy.ndimage\n\nfrom ._dispatcher import Dispatcher\n\n__all__ = [\n    \"dispatch_binary_dilation\",\n    \"dispatch_binary_erosion\",\n    \"dispatch_binary_structure\",\n]\n\ndispatch_binary_dilation = Dispatcher(name=\"dispatch_binary_dilation\")\ndispatch_binary_erosion = Dispatcher(name=\"dispatch_binary_erosion\")\ndispatch_binary_structure = Dispatcher(name='dispatch_binary_structure')\n\n\n# ================== binary_dilation ==================\n@dispatch_binary_dilation.register(np.ndarray)\ndef numpy_binary_dilation(*args, **kwargs):\n    return scipy.ndimage.binary_dilation\n\n\n@dispatch_binary_dilation.register_lazy(\"cupy\")\ndef register_cupy_binary_dilation():\n    import cupy\n    import cupyx.scipy.ndimage\n\n    @dispatch_binary_dilation.register(cupy.ndarray)\n    def cupy_binary_dilation(*args, **kwargs):\n        return cupyx.scipy.ndimage.binary_dilation\n\n\n# ================== binary_erosion ==================\n@dispatch_binary_erosion.register(np.ndarray)\ndef numpy_binary_erosion(*args, **kwargs):\n    return scipy.ndimage.binary_erosion\n\n\n@dispatch_binary_erosion.register_lazy(\"cupy\")\ndef register_cupy_binary_erosion():\n    import cupy\n    import cupyx.scipy.ndimage\n\n    @dispatch_binary_erosion.register(cupy.ndarray)\n    def cupy_binary_erosion(*args, **kwargs):\n        return cupyx.scipy.ndimage.binary_erosion\n\n\n# ================== generate_binary_structure ==================\n@dispatch_binary_structure.register(np.ndarray)\ndef numpy_binary_structure(*args, **kwargs):\n    return scipy.ndimage.generate_binary_structure\n\n\n@dispatch_binary_structure.register_lazy(\"cupy\")\ndef register_cupy_binary_structure():\n    import cupy\n    import cupyx.scipy.ndimage\n\n    @dispatch_binary_structure.register(cupy.ndarray)\n    def cupy_binary_structure(*args, **kwargs):\n        return cupyx.scipy.ndimage.generate_binary_structure\n"
  },
  {
    "path": "dask_image/dispatch/_dispatcher.py",
    "content": "# -*- coding: utf-8 -*-\n\nfrom dask.utils import Dispatch\n\n\ndef get_type(array):\n    \"\"\"Return type of arrays contained within the dask array chunks.\"\"\"\n    try:\n        datatype = type(array._meta)  # Check chunk type backing dask array\n    except AttributeError:\n        datatype = type(array)  # For all non-dask arrays\n    return datatype\n\n\nclass Dispatcher(Dispatch):\n    \"\"\"Simple single dispatch for different dask array types.\"\"\"\n\n    def __call__(self, arg, *args, **kwargs):\n        \"\"\"\n        Call the corresponding method based on type of dask array.\n        \"\"\"\n        datatype = get_type(arg)\n        meth = self.dispatch(datatype)\n        return meth(arg, *args, **kwargs)\n"
  },
  {
    "path": "dask_image/dispatch/_utils.py",
    "content": "# -*- coding: utf-8 -*-\n\nfrom ._dispatcher import get_type\n\n__all__ = [\n    \"check_arraytypes_compatible\",\n]\n\n\ndef check_arraytypes_compatible(*args):\n    \"\"\"Check array types are compatible.\n\n    For arrays to be compatible they must either have the same type,\n    or a dask array where the chunks match the same array type.\n\n    Examples of compatible arrays:\n    * Two (or more) numpy arrays\n    * A dask array with numpy chunks, and a numpy array\n\n    Examples of incompatible arrays:\n    * A numpy array and a cupy array\n    \"\"\"\n    arraytypes = [get_type(arg) for arg in args]\n    if len(set(arraytypes)) != 1:\n        raise ValueError(\"Array types must be compatible.\")\n"
  },
  {
    "path": "dask_image/imread/__init__.py",
    "content": "# -*- coding: utf-8 -*-\nimport glob\nimport numbers\nimport warnings\n\nimport dask.array as da\nimport numpy as np\nimport pims\nfrom tifffile import natural_sorted\n\n\ndef imread(fname, nframes=1, *, arraytype=\"numpy\"):\n    \"\"\"\n    Read image data into a Dask Array.\n\n    Provides a simple, fast mechanism to ingest image data into a\n    Dask Array. This uses the `pims` package to open images.\n\n    Parameters\n    ----------\n    fname : str or pathlib.Path\n        A glob like string that may match one or multiple filenames.\n        Where multiple filenames match, they are sorted using\n        natural (as opposed to alphabetical) sort.\n    nframes : int, optional\n        Number of the frames to include in each chunk (default: 1).\n    arraytype : str, optional\n        Array type for dask chunks. Available options: \"numpy\", \"cupy\".\n\n    Returns\n    -------\n    array : dask.array.Array\n        A Dask Array representing the contents of all image files.\n\n    Warnings\n    --------\n    There are several known issues with this function, and users are\n    recommended to use `dask.array.image.imread` or `bioio` instead.\n    \"\"\"\n\n    sfname = str(fname)\n    if not isinstance(nframes, numbers.Integral):\n        raise ValueError(\"`nframes` must be an integer.\")\n    if (nframes != -1) and not (nframes > 0):\n        raise ValueError(\"`nframes` must be greater than zero.\")\n\n    if arraytype == \"numpy\":\n        arrayfunc = np.asanyarray\n    elif arraytype == \"cupy\":   # pragma: no cover\n        import cupy\n        arrayfunc = cupy.asanyarray\n\n    with pims.open(sfname) as imgs:\n        shape = (len(imgs),) + imgs.frame_shape\n        dtype = np.dtype(imgs.pixel_type)\n\n    if nframes == -1:\n        nframes = shape[0]\n\n    if nframes > shape[0]:\n        warnings.warn(\n            \"`nframes` larger than number of frames in file.\"\n            \" Will truncate to number of frames in file.\",\n            RuntimeWarning\n        )\n    elif shape[0] % nframes != 0:\n        warnings.warn(\n            \"`nframes` does not nicely divide number of frames in file.\"\n            \" Last chunk will contain the remainder.\",\n            RuntimeWarning\n        )\n\n    # place source filenames into dask array after sorting\n    filenames = natural_sorted(glob.glob(sfname))\n    if len(filenames) > 1:\n        ar = da.from_array(filenames, chunks=(nframes,))\n        multiple_files = True\n    else:\n        ar = da.from_array(filenames * shape[0], chunks=(nframes,))\n        multiple_files = False\n\n    # read in data using encoded filenames\n    a = ar.map_blocks(\n        _map_read_frame,\n        chunks=da.core.normalize_chunks(\n            (nframes,) + shape[1:], shape),\n        multiple_files=multiple_files,\n        new_axis=list(range(1, len(shape))),\n        arrayfunc=arrayfunc,\n        meta=arrayfunc([]).astype(dtype),  # meta overwrites `dtype` argument\n    )\n    return a\n\n\ndef _map_read_frame(x, multiple_files, block_info=None, **kwargs):\n\n    fn = x[0]  # get filename from input chunk\n\n    if multiple_files:\n        i, j = 0, 1\n    else:\n        i, j = block_info[None]['array-location'][0]\n\n    return _read_frame(fn=fn, i=slice(i, j), **kwargs)\n\n\ndef _read_frame(fn, i, *, arrayfunc=np.asanyarray):\n    with pims.open(fn) as imgs:\n        return arrayfunc(imgs[i])\n"
  },
  {
    "path": "dask_image/ndfilters/__init__.py",
    "content": "# -*- coding: utf-8 -*-\n\n__all__ = [\n    \"convolve\",\n    \"correlate\",\n    \"laplace\",\n    \"prewitt\",\n    \"sobel\",\n    \"gaussian\",\n    \"gaussian_filter\",\n    \"gaussian_gradient_magnitude\",\n    \"gaussian_laplace\",\n    \"generic_filter\",\n    \"minimum_filter\",\n    \"median_filter\",\n    \"maximum_filter\",\n    \"rank_filter\",\n    \"percentile_filter\",\n    \"uniform_filter\",\n    \"threshold_local\",\n]\n\nfrom ._conv import convolve, correlate\nfrom ._diff import laplace\nfrom ._edge import prewitt, sobel\nfrom ._gaussian import (gaussian, gaussian_filter,\n                        gaussian_gradient_magnitude, gaussian_laplace)\nfrom ._generic import generic_filter\nfrom ._order import (maximum_filter, median_filter, minimum_filter,\n                     percentile_filter, rank_filter)\nfrom ._smooth import uniform_filter\nfrom ._threshold import threshold_local\n\nconvolve.__module__ = __name__\ncorrelate.__module__ = __name__\n\nlaplace.__module__ = __name__\n\nprewitt.__module__ = __name__\nsobel.__module__ = __name__\n\ngaussian.__module__ = __name__\ngaussian_filter.__module__ = __name__\ngaussian_gradient_magnitude.__module__ = __name__\ngaussian_laplace.__module__ = __name__\n\ngeneric_filter.__module__ = __name__\n\nminimum_filter.__module__ = __name__\nmedian_filter.__module__ = __name__\nmaximum_filter.__module__ = __name__\nrank_filter.__module__ = __name__\npercentile_filter.__module__ = __name__\n\nuniform_filter.__module__ = __name__\n\nthreshold_local.__module__ = __name__\n"
  },
  {
    "path": "dask_image/ndfilters/_conv.py",
    "content": "# -*- coding: utf-8 -*-\nimport scipy.ndimage\n\nfrom ..dispatch._dispatch_ndfilters import (dispatch_convolve,\n                                            dispatch_correlate)\nfrom ..dispatch._utils import check_arraytypes_compatible\nfrom . import _utils\n\n__all__ = [\n    \"convolve\",\n    \"correlate\",\n]\n\n\n@_utils._update_wrapper(scipy.ndimage.convolve)\ndef convolve(image, weights, mode=\"reflect\", cval=0.0, origin=0):\n    check_arraytypes_compatible(image, weights)\n\n    origin = _utils._get_origin(weights.shape, origin)\n    depth = _utils._get_depth(weights.shape, origin)\n    depth, boundary = _utils._get_depth_boundary(image.ndim, depth, \"none\")\n\n    if mode == \"wrap\":  # Fixes https://github.com/dask/dask-image/issues/242\n        boundary = \"periodic\"\n        mode = \"constant\"\n\n    result = image.map_overlap(\n        dispatch_convolve(image),\n        depth=depth,\n        boundary=boundary,\n        dtype=image.dtype,\n        meta=image._meta,\n        weights=weights,\n        mode=mode,\n        cval=cval,\n        origin=origin,\n    )\n\n    return result\n\n\n@_utils._update_wrapper(scipy.ndimage.correlate)\ndef correlate(image, weights, mode=\"reflect\", cval=0.0, origin=0):\n    check_arraytypes_compatible(image, weights)\n\n    origin = _utils._get_origin(weights.shape, origin)\n    depth = _utils._get_depth(weights.shape, origin)\n    depth, boundary = _utils._get_depth_boundary(image.ndim, depth, \"none\")\n\n    if mode == \"wrap\":  # Fixes https://github.com/dask/dask-image/issues/242\n        boundary = \"periodic\"\n        mode = \"constant\"\n\n    result = image.map_overlap(\n        dispatch_correlate(image),\n        depth=depth,\n        boundary=boundary,\n        dtype=image.dtype,\n        meta=image._meta,\n        weights=weights,\n        mode=mode,\n        cval=cval,\n        origin=origin,\n    )\n\n    return result\n"
  },
  {
    "path": "dask_image/ndfilters/_diff.py",
    "content": "# -*- coding: utf-8 -*-\n\n\nimport scipy.ndimage\n\nfrom ..dispatch._dispatch_ndfilters import dispatch_laplace\nfrom . import _utils\n\n__all__ = [\n    \"laplace\",\n]\n\n\n@_utils._update_wrapper(scipy.ndimage.laplace)\ndef laplace(image, mode='reflect', cval=0.0):\n    result = image.map_overlap(\n        dispatch_laplace(image),\n        depth=(image.ndim * (1,)),\n        boundary=\"none\",\n        dtype=image.dtype,\n        meta=image._meta,\n        mode=mode,\n        cval=cval\n    )\n\n    return result\n"
  },
  {
    "path": "dask_image/ndfilters/_edge.py",
    "content": "# -*- coding: utf-8 -*-\n\n\nimport numbers\n\nimport scipy.ndimage\n\nfrom ..dispatch._dispatch_ndfilters import dispatch_prewitt, dispatch_sobel\nfrom . import _utils\n\n__all__ = [\n    \"prewitt\",\n    \"sobel\",\n]\n\n\ndef _validate_axis(ndim, axis):\n    if not isinstance(axis, numbers.Integral):\n        raise ValueError(\"The axis must be of integral type.\")\n    if axis < -ndim or axis >= ndim:\n        raise ValueError(\"The axis is out of range.\")\n\n\n@_utils._update_wrapper(scipy.ndimage.prewitt)\ndef prewitt(image, axis=-1, mode='reflect', cval=0.0):\n    _validate_axis(image.ndim, axis)\n\n    result = image.map_overlap(\n        dispatch_prewitt(image),\n        depth=(image.ndim * (1,)),\n        boundary=\"none\",\n        dtype=image.dtype,\n        meta=image._meta,\n        axis=axis,\n        mode=mode,\n        cval=cval\n    )\n\n    return result\n\n\n@_utils._update_wrapper(scipy.ndimage.sobel)\ndef sobel(image, axis=-1, mode='reflect', cval=0.0):\n    _validate_axis(image.ndim, axis)\n\n    result = image.map_overlap(\n        dispatch_sobel(image),\n        depth=(image.ndim * (1,)),\n        boundary=\"none\",\n        dtype=image.dtype,\n        meta=image._meta,\n        axis=axis,\n        mode=mode,\n        cval=cval\n    )\n\n    return result\n"
  },
  {
    "path": "dask_image/ndfilters/_gaussian.py",
    "content": "# -*- coding: utf-8 -*-\n\n\nimport numbers\n\nimport numpy as np\nimport scipy.ndimage\n\nfrom ..dispatch._dispatch_ndfilters import (\n    dispatch_gaussian_filter, dispatch_gaussian_gradient_magnitude,\n    dispatch_gaussian_laplace)\nfrom . import _utils\n\n__all__ = [\n    \"gaussian_filter\",\n    \"gaussian_gradient_magnitude\",\n    \"gaussian_laplace\",\n    \"gaussian\"\n]\n\n\ndef _get_sigmas(image, sigma):\n    ndim = image.ndim\n\n    nsigmas = np.array(sigma)\n    if nsigmas.ndim == 0:\n        nsigmas = np.array(ndim * [nsigmas[()]])\n\n    if nsigmas.ndim != 1:\n        raise RuntimeError(\n            \"Must have a single sigma or a single sequence.\"\n        )\n\n    if ndim != len(nsigmas):\n        raise RuntimeError(\n            \"Must have an equal number of sigmas to image dimensions.\"\n        )\n\n    if not issubclass(nsigmas.dtype.type, numbers.Real):\n        raise TypeError(\"Must have real sigmas.\")\n\n    nsigmas = tuple(nsigmas)\n\n    return nsigmas\n\n\ndef _get_border(image, sigma, truncate):\n    sigma = np.array(_get_sigmas(image, sigma))\n\n    if not isinstance(truncate, numbers.Real):\n        raise TypeError(\"Must have a real truncate value.\")\n\n    half_shape = tuple(np.ceil(sigma * truncate).astype(int))\n\n    return half_shape\n\n\n@_utils._update_wrapper(scipy.ndimage.gaussian_filter)\ndef gaussian_filter(image,\n                    sigma,\n                    order=0,\n                    mode='reflect',\n                    cval=0.0,\n                    truncate=4.0):\n    sigma = _get_sigmas(image, sigma)\n    depth = _get_border(image, sigma, truncate)\n\n    depth, boundary = _utils._get_depth_boundary(image.ndim, depth, \"none\")\n\n    result = image.map_overlap(\n        dispatch_gaussian_filter(image),\n        depth=depth,\n        boundary=boundary,\n        dtype=image.dtype,\n        meta=image._meta,\n        sigma=sigma,\n        order=order,\n        mode=mode,\n        cval=cval,\n        truncate=truncate\n    )\n\n    return result\n\n\ndef gaussian(image,\n             sigma,\n             order=0,\n             mode='reflect',\n             cval=0.0,\n             truncate=4.0):\n    \"\"\"Alias of `dask_image.ndfilters.gaussian_filter`.\"\"\"\n    return gaussian_filter(image,\n                           sigma,\n                           order=order,\n                           mode=mode,\n                           cval=cval,\n                           truncate=truncate)\n\n\n@_utils._update_wrapper(scipy.ndimage.gaussian_gradient_magnitude)\ndef gaussian_gradient_magnitude(image,\n                                sigma,\n                                mode='reflect',\n                                cval=0.0,\n                                truncate=4.0,\n                                **kwargs):\n    sigma = _get_sigmas(image, sigma)\n    depth = _get_border(image, sigma, truncate)\n\n    depth, boundary = _utils._get_depth_boundary(image.ndim, depth, \"none\")\n\n    result = image.map_overlap(\n        dispatch_gaussian_gradient_magnitude(image),\n        depth=depth,\n        boundary=boundary,\n        dtype=image.dtype,\n        meta=image._meta,\n        sigma=sigma,\n        mode=mode,\n        cval=cval,\n        truncate=truncate,\n        **kwargs\n    )\n\n    return result\n\n\n@_utils._update_wrapper(scipy.ndimage.gaussian_laplace)\ndef gaussian_laplace(image,\n                     sigma,\n                     mode='reflect',\n                     cval=0.0,\n                     truncate=4.0,\n                     **kwargs):\n    sigma = _get_sigmas(image, sigma)\n    depth = _get_border(image, sigma, truncate)\n\n    depth, boundary = _utils._get_depth_boundary(image.ndim, depth, \"none\")\n\n    result = image.map_overlap(\n        dispatch_gaussian_laplace(image),\n        depth=depth,\n        boundary=boundary,\n        dtype=image.dtype,\n        meta=image._meta,\n        sigma=sigma,\n        mode=mode,\n        cval=cval,\n        truncate=truncate,\n        **kwargs\n    )\n\n    return result\n"
  },
  {
    "path": "dask_image/ndfilters/_generic.py",
    "content": "# -*- coding: utf-8 -*-\n\nimport numpy as np\nimport scipy.ndimage\n\nfrom ..dispatch._dispatch_ndfilters import dispatch_generic_filter\nfrom . import _utils\n\n__all__ = [\n    \"generic_filter\",\n]\n\n\n@_utils._update_wrapper(scipy.ndimage.generic_filter)\ndef generic_filter(image,\n                   function,\n                   size=None,\n                   footprint=None,\n                   mode='reflect',\n                   cval=0.0,\n                   origin=0,\n                   extra_arguments=tuple(),\n                   extra_keywords=dict()):\n    footprint = _utils._get_footprint(image.ndim, size, footprint)\n    origin = _utils._get_origin(footprint.shape, origin)\n    depth = _utils._get_depth(footprint.shape, origin)\n    depth, boundary = _utils._get_depth_boundary(footprint.ndim, depth, \"none\")\n\n    if type(image._meta) is np.ndarray:\n        kwargs = {\"extra_arguments\": extra_arguments,\n                  \"extra_keywords\": extra_keywords}\n    else:  # pragma: no cover\n        # cupy generic_filter doesn't support extra_arguments or extra_keywords\n        kwargs = {}\n\n    result = image.map_overlap(\n        dispatch_generic_filter(image),\n        depth=depth,\n        boundary=boundary,\n        dtype=image.dtype,\n        meta=image._meta,\n        function=function,\n        footprint=footprint,\n        mode=mode,\n        cval=cval,\n        origin=origin,\n        **kwargs\n    )\n\n    return result\n"
  },
  {
    "path": "dask_image/ndfilters/_order.py",
    "content": "# -*- coding: utf-8 -*-\n\nimport scipy.ndimage\n\nfrom ..dispatch._dispatch_ndfilters import (dispatch_maximum_filter,\n                                            dispatch_median_filter,\n                                            dispatch_minimum_filter,\n                                            dispatch_percentile_filter,\n                                            dispatch_rank_filter)\nfrom . import _utils\n\n__all__ = [\n    \"minimum_filter\",\n    \"median_filter\",\n    \"maximum_filter\",\n    \"rank_filter\",\n    \"percentile_filter\",\n]\n\n\n@_utils._update_wrapper(scipy.ndimage.minimum_filter)\ndef minimum_filter(image,\n                   size=None,\n                   footprint=None,\n                   mode='reflect',\n                   cval=0.0,\n                   origin=0):\n    footprint = _utils._get_footprint(image.ndim, size, footprint)\n    origin = _utils._get_origin(footprint.shape, origin)\n    depth = _utils._get_depth(footprint.shape, origin)\n    depth, boundary = _utils._get_depth_boundary(footprint.ndim, depth, \"none\")\n\n    result = image.map_overlap(\n        dispatch_minimum_filter(image),\n        depth=depth,\n        boundary=boundary,\n        dtype=image.dtype,\n        meta=image._meta,\n        footprint=footprint,\n        mode=mode,\n        cval=cval,\n        origin=origin\n    )\n\n    return result\n\n\n@_utils._update_wrapper(scipy.ndimage.median_filter)\ndef median_filter(image,\n                  size=None,\n                  footprint=None,\n                  mode='reflect',\n                  cval=0.0,\n                  origin=0):\n    footprint = _utils._get_footprint(image.ndim, size, footprint)\n    origin = _utils._get_origin(footprint.shape, origin)\n    depth = _utils._get_depth(footprint.shape, origin)\n    depth, boundary = _utils._get_depth_boundary(footprint.ndim, depth, \"none\")\n\n    result = image.map_overlap(\n        dispatch_median_filter(image),\n        depth=depth,\n        boundary=boundary,\n        dtype=image.dtype,\n        meta=image._meta,\n        footprint=footprint,\n        mode=mode,\n        cval=cval,\n        origin=origin\n    )\n\n    return result\n\n\n@_utils._update_wrapper(scipy.ndimage.maximum_filter)\ndef maximum_filter(image,\n                   size=None,\n                   footprint=None,\n                   mode='reflect',\n                   cval=0.0,\n                   origin=0):\n    footprint = _utils._get_footprint(image.ndim, size, footprint)\n    origin = _utils._get_origin(footprint.shape, origin)\n    depth = _utils._get_depth(footprint.shape, origin)\n    depth, boundary = _utils._get_depth_boundary(footprint.ndim, depth, \"none\")\n\n    result = image.map_overlap(\n        dispatch_maximum_filter(image),\n        depth=depth,\n        boundary=boundary,\n        dtype=image.dtype,\n        meta=image._meta,\n        footprint=footprint,\n        mode=mode,\n        cval=cval,\n        origin=origin\n    )\n\n    return result\n\n\n@_utils._update_wrapper(scipy.ndimage.rank_filter)\ndef rank_filter(image,\n                rank,\n                size=None,\n                footprint=None,\n                mode='reflect',\n                cval=0.0,\n                origin=0):\n    footprint = _utils._get_footprint(image.ndim, size, footprint)\n    origin = _utils._get_origin(footprint.shape, origin)\n    depth = _utils._get_depth(footprint.shape, origin)\n    depth, boundary = _utils._get_depth_boundary(footprint.ndim, depth, \"none\")\n\n    result = image.map_overlap(\n        dispatch_rank_filter(image),\n        depth=depth,\n        boundary=boundary,\n        dtype=image.dtype,\n        meta=image._meta,\n        rank=rank,\n        footprint=footprint,\n        mode=mode,\n        cval=cval,\n        origin=origin\n    )\n\n    return result\n\n\n@_utils._update_wrapper(scipy.ndimage.percentile_filter)\ndef percentile_filter(image,\n                      percentile,\n                      size=None,\n                      footprint=None,\n                      mode='reflect',\n                      cval=0.0,\n                      origin=0):\n    footprint = _utils._get_footprint(image.ndim, size, footprint)\n    origin = _utils._get_origin(footprint.shape, origin)\n    depth = _utils._get_depth(footprint.shape, origin)\n    depth, boundary = _utils._get_depth_boundary(footprint.ndim, depth, \"none\")\n\n    result = image.map_overlap(\n        dispatch_percentile_filter(image),\n        depth=depth,\n        boundary=boundary,\n        dtype=image.dtype,\n        meta=image._meta,\n        percentile=percentile,\n        footprint=footprint,\n        mode=mode,\n        cval=cval,\n        origin=origin\n    )\n\n    return result\n"
  },
  {
    "path": "dask_image/ndfilters/_smooth.py",
    "content": "# -*- coding: utf-8 -*-\n\n\nimport scipy.ndimage\n\nfrom ..dispatch._dispatch_ndfilters import dispatch_uniform_filter\nfrom . import _utils\nfrom ._gaussian import gaussian_filter\n\n__all__ = [\n    \"uniform_filter\",\n]\n\ngaussian_filter = gaussian_filter\n\n\n@_utils._update_wrapper(scipy.ndimage.uniform_filter)\ndef uniform_filter(image,\n                   size=3,\n                   mode='reflect',\n                   cval=0.0,\n                   origin=0):\n    size = _utils._get_size(image.ndim, size)\n    depth = _utils._get_depth(size, origin)\n\n    depth, boundary = _utils._get_depth_boundary(image.ndim, depth, \"none\")\n\n    result = image.map_overlap(\n        dispatch_uniform_filter(image),\n        depth=depth,\n        boundary=boundary,\n        dtype=image.dtype,\n        meta=image._meta,\n        size=size,\n        mode=mode,\n        cval=cval,\n        origin=origin\n    )\n\n    return result\n"
  },
  {
    "path": "dask_image/ndfilters/_threshold.py",
    "content": "import numpy as np\n\nfrom ..dispatch._dispatch_ndfilters import dispatch_threshold_local_mean\nfrom . import _gaussian, _generic, _order\n\n__all__ = [\n    \"threshold_local\",\n]\n\n\ndef threshold_local(image, block_size, method='gaussian', offset=0,\n                    mode='reflect', param=None, cval=0):\n    \"\"\"Compute a threshold mask image based on local pixel neighborhood.\n\n    Also known as adaptive or dynamic thresholding[1]_. The threshold value is\n    the weighted mean for the local neighborhood of a pixel subtracted by a\n    constant. Alternatively the threshold can be determined dynamically by a\n    given function, using the 'generic' method.\n\n    Parameters\n    ----------\n    image : (N, M) dask ndarray\n        Input image.\n    block_size : int or list/tuple/array\n        Size of pixel neighborhood which is used to calculate the\n        threshold value.\n        (1) A single value for use in all dimensions or\n        (2) A tuple, list, or array with length equal to image.ndim\n    method : {'generic', 'gaussian', 'mean', 'median'}, optional\n        Method used to determine adaptive threshold for local neighbourhood in\n        weighted mean image.\n\n        * 'generic': use custom function (see `param` parameter)\n        * 'gaussian': apply gaussian filter (see `param` parameter for custom\\\n                      sigma value)\n        * 'mean': apply arithmetic mean filter\n        * 'median': apply median rank filter\n\n        By default the 'gaussian' method is used.\n    offset : float, optional\n        Constant subtracted from weighted mean of neighborhood to calculate\n        the local threshold value. Default offset is 0.\n    mode : {'reflect', 'constant', 'nearest', 'mirror', 'wrap'}, optional\n        The mode parameter determines how the array borders are handled, where\n        cval is the value when mode is equal to 'constant'.\n        Default is 'reflect'.\n    param : {int, function}, optional\n        Either specify sigma for 'gaussian' method or function object for\n        'generic' method. This functions takes the flat array of local\n        neighbourhood as a single argument and returns the calculated\n        threshold for the centre pixel.\n    cval : float, optional\n        Value to fill past edges of input if mode is 'constant'.\n\n    Returns\n    -------\n    threshold : (N, M) dask ndarray\n        Threshold image. All pixels in the input image higher than the\n        corresponding pixel in the threshold image are considered foreground.\n\n    References\n    ----------\n    .. [1] https://docs.opencv.org/modules/imgproc/doc/miscellaneous_transformations.html?highlight=threshold\n\n    Examples\n    --------\n    >>> import dask.array as da\n    >>> image = da.random.random((1000, 1000), chunks=(100, 100))\n    >>> result = threshold_local(image, 15, 'gaussian')\n    \"\"\"  # noqa\n\n    image = image.astype(np.float64)\n\n    if method == 'generic':\n        if not callable(param):\n            raise ValueError(\"Must include a valid function to use as the \"\n                             \"'param' keyword argument.\")\n        thresh_image = _generic.generic_filter(image, param, block_size,\n                                               mode=mode, cval=cval)\n    elif method == 'gaussian':\n        if param is None:\n            sigma = (np.array(block_size).astype(float) - 1) / 6.0\n        else:\n            sigma = param\n        thresh_image = _gaussian.gaussian_filter(image, sigma, mode=mode,\n                                                 cval=cval)\n    elif method == 'mean':\n        thresh_image = _generic.generic_filter(\n            image, dispatch_threshold_local_mean(image), block_size, mode=mode,\n            cval=cval)\n    elif method == 'median':\n        thresh_image = _order.median_filter(image, block_size, mode=mode,\n                                            cval=cval)\n    else:\n        raise ValueError(\"Invalid method specified. Please use `generic`, \"\n                         \"`gaussian`, `mean`, or `median`.\")\n    return thresh_image - offset\n"
  },
  {
    "path": "dask_image/ndfilters/_utils.py",
    "content": "# -*- coding: utf-8 -*-\nimport collections\nimport inspect\nimport numbers\nimport re\n\nimport numpy as np\n\n\ndef _get_docstring(func):\n    # Drop the output parameter from the docstring.\n    split_doc_params = lambda s: re.subn(                         # noqa: E731\n        \"(    [A-Za-z]+ : )\", \"\\0\\\\1\", s)[0].split(\"\\0\")\n    drop_doc_param = lambda s: not s.startswith(\"    output : \")  # noqa: E731\n    func_doc = \"\" if func.__doc__ is None else func.__doc__\n    cleaned_docstring = \"\".join([\n        l for l in split_doc_params(func_doc) if drop_doc_param(l)  # noqa: E741, E501\n    ])\n    cleaned_docstring = cleaned_docstring.replace('input', 'image')\n    cleaned_docstring = cleaned_docstring.replace('labels', 'label_image')\n    cleaned_docstring = cleaned_docstring.split('Examples')[0].strip()\n\n    docstring = \"\"\"\n    Wrapped copy of \"{mod_name}.{func_name}\"\n\n\n    Excludes the output parameter as it would not work with Dask arrays.\n\n\n    Original docstring:\n\n    {doc}\n    \"\"\".format(\n        mod_name=inspect.getmodule(func).__name__,\n        func_name=func.__name__,\n        doc=cleaned_docstring,\n    )\n\n    return docstring\n\n\ndef _update_wrapper(func):\n    def _updater(wrapper):\n        wrapper.__name__ = func.__name__\n        wrapper.__doc__ = _get_docstring(func)\n        return wrapper\n\n    return _updater\n\n\ndef _get_depth_boundary(ndim, depth, boundary=None):\n    strlike = (bytes, str)\n\n    if not isinstance(ndim, numbers.Integral):\n        raise TypeError(\"Expected integer value for `ndim`.\")\n    if ndim <= 0:\n        raise ValueError(\"Expected positive value for `ndim`.\")\n\n    if isinstance(depth, numbers.Number):\n        depth = ndim * (depth,)\n    if not isinstance(depth, collections.abc.Sized):\n        raise TypeError(\"Unexpected type for `depth`.\")\n    if len(depth) != ndim:\n        raise ValueError(\"Expected `depth` to have a length equal to `ndim`.\")\n    if isinstance(depth, collections.abc.Sequence):\n        depth = dict(zip(range(ndim), depth))\n    if not isinstance(depth, collections.abc.Mapping):\n        raise TypeError(\"Unexpected type for `depth`.\")\n\n    if not all(map(lambda d: isinstance(d, numbers.Integral), depth.values())):\n        raise TypeError(\"Expected integer values for `depth`.\")\n    if not all(map(lambda d: d >= 0, depth.values())):\n        raise ValueError(\"Expected positive semidefinite values for `depth`.\")\n\n    depth = dict([(a, int(d)) for a, d in depth.items()])\n\n    if (boundary is None) or isinstance(boundary, strlike):\n        boundary = ndim * (boundary,)\n    if not isinstance(boundary, collections.abc.Sized):\n        raise TypeError(\"Unexpected type for `boundary`.\")\n    if len(boundary) != ndim:\n        raise ValueError(\n            \"Expected `boundary` to have a length equal to `ndim`.\"\n        )\n    if isinstance(boundary, collections.abc.Sequence):\n        boundary = dict(zip(range(ndim), boundary))\n    if not isinstance(boundary, collections.abc.Mapping):\n        raise TypeError(\"Unexpected type for `boundary`.\")\n\n    type_check = lambda b: (b is None) or isinstance(b, strlike)  # noqa: E731\n    if not all(map(type_check, boundary.values())):\n        raise TypeError(\"Expected string-like values for `boundary`.\")\n\n    return depth, boundary\n\n\ndef _get_size(ndim, size):\n    if not isinstance(ndim, numbers.Integral):\n        raise TypeError(\"The ndim must be of integral type.\")\n\n    if isinstance(size, numbers.Number):\n        size = ndim * (size,)\n    size = np.array(size)\n\n    if size.ndim != 1:\n        raise RuntimeError(\"The size must have only one dimension.\")\n    if len(size) != ndim:\n        raise RuntimeError(\n            \"The size must have a length equal to the number of dimensions.\"\n        )\n    if not issubclass(size.dtype.type, numbers.Integral):\n        raise TypeError(\"The size must be of integral type.\")\n\n    size = tuple(size)\n\n    return size\n\n\ndef _get_origin(size, origin=0):\n    size = np.array(size)\n    ndim = len(size)\n\n    if isinstance(origin, numbers.Number):\n        origin = ndim * (origin,)\n\n    origin = np.array(origin)\n\n    if not issubclass(origin.dtype.type, numbers.Integral):\n        raise TypeError(\"The origin must be of integral type.\")\n\n    # Validate dimensions.\n    if origin.ndim != 1:\n        raise RuntimeError(\"The origin must have only one dimension.\")\n    if len(origin) != ndim:\n        raise RuntimeError(\n            \"The origin must have the same length as the number of dimensions\"\n            \" as the array being filtered.\"\n        )\n\n    # Validate origin is bounded.\n    if not (origin < ((size + 1) // 2)).all():\n        raise ValueError(\"The origin must be within the footprint.\")\n\n    origin = tuple(origin)\n\n    return origin\n\n\ndef _get_depth(size, origin=0):\n    origin = np.array(_get_origin(size, origin))\n    size = np.array(size)\n\n    half_size = size // 2\n    depth = half_size + abs(origin)\n\n    depth = tuple(depth)\n\n    return depth\n\n\ndef _get_footprint(ndim, size=None, footprint=None):\n    # Verify that we only got size or footprint.\n    if size is None and footprint is None:\n        raise RuntimeError(\"Must provide either size or footprint.\")\n    if size is not None and footprint is not None:\n        raise RuntimeError(\"Provide either size or footprint, but not both.\")\n\n    # Get a footprint based on the size.\n    if size is not None:\n        size = _get_size(ndim, size)\n        footprint = np.ones(size, dtype=bool)\n\n    # Validate the footprint.\n    if footprint.ndim != ndim:\n        raise RuntimeError(\n            \"The footprint must have the same number of dimensions as\"\n            \" the array being filtered.\"\n        )\n    if footprint.size == 0:\n        raise RuntimeError(\"The footprint must have only non-zero dimensions.\")\n\n    # Convert to Boolean.\n    footprint = (footprint != 0)\n\n    return footprint\n"
  },
  {
    "path": "dask_image/ndfourier/__init__.py",
    "content": "# -*- coding: utf-8 -*-\nimport numbers\n\nimport dask.array as da\n\nfrom . import _utils\n\n__all__ = [\n    \"fourier_gaussian\",\n    \"fourier_shift\",\n    \"fourier_uniform\",\n]\n\n\ndef fourier_gaussian(image, sigma, n=-1, axis=-1):\n    \"\"\"\n    Multi-dimensional Gaussian fourier filter.\n\n    The array is multiplied with the fourier transform of a Gaussian\n    kernel.\n\n    Parameters\n    ----------\n    image : array_like\n        The input image.\n    sigma : float or sequence\n        The sigma of the Gaussian kernel. If a float, `sigma` is the same for\n        all axes. If a sequence, `sigma` has to contain one value for each\n        axis.\n    n : int, optional\n        If `n` is negative (default), then the image is assumed to be the\n        result of a complex fft.\n        If `n` is larger than or equal to zero, the image is assumed to be the\n        result of a real fft, and `n` gives the length of the array before\n        transformation along the real transform direction.\n    axis : int, optional\n        The axis of the real transform.\n\n    Returns\n    -------\n    fourier_gaussian : Dask Array\n\n    Examples\n    --------\n    >>> from scipy import ndimage, misc\n    >>> import numpy.fft\n    >>> import matplotlib.pyplot as plt\n    >>> fig, (ax1, ax2) = plt.subplots(1, 2)\n    >>> plt.gray()  # show the filtered result in grayscale\n    >>> ascent = misc.ascent()\n    >>> image = numpy.fft.fft2(ascent)\n    >>> result = ndimage.fourier_gaussian(image, sigma=4)\n    >>> result = numpy.fft.ifft2(result)\n    >>> ax1.imshow(ascent)\n    \"\"\"\n\n    # Validate and normalize arguments\n    image, sigma, n, axis = _utils._norm_args(image, sigma, n=n, axis=axis)\n\n    # Compute frequencies\n    ang_freq_grid = _utils._get_ang_freq_grid(\n        image.shape,\n        chunks=image.chunks,\n        n=n,\n        axis=axis,\n        dtype=sigma.dtype\n    )\n\n    # Compute Fourier transformed Gaussian\n    result = image.copy()\n    scale = (sigma ** 2) / -2\n\n    for ax, f in enumerate(ang_freq_grid):\n        f *= f\n        gaussian = da.exp(scale[ax] * f)\n        gaussian = _utils._reshape_nd(gaussian, ndim=image.ndim, axis=ax)\n        result *= gaussian\n\n    return result\n\n\ndef fourier_shift(image, shift, n=-1, axis=-1):\n    \"\"\"\n    Multi-dimensional fourier shift filter.\n\n    The array is multiplied with the fourier transform of a shift operation.\n\n    Parameters\n    ----------\n    image : array_like\n        The input image.\n    shift : float or sequence\n        The size of the box used for filtering.\n        If a float, `shift` is the same for all axes. If a sequence, `shift`\n        has to contain one value for each axis.\n    n : int, optional\n        If `n` is negative (default), then the image is assumed to be the\n        result of a complex fft.\n        If `n` is larger than or equal to zero, the image is assumed to be the\n        result of a real fft, and `n` gives the length of the array before\n        transformation along the real transform direction.\n    axis : int, optional\n        The axis of the real transform.\n\n    Returns\n    -------\n    fourier_shift : Dask Array\n\n    Examples\n    --------\n    >>> from scipy import ndimage, misc\n    >>> import matplotlib.pyplot as plt\n    >>> import numpy.fft\n    >>> fig, (ax1, ax2) = plt.subplots(1, 2)\n    >>> plt.gray()  # show the filtered result in grayscale\n    >>> ascent = misc.ascent()\n    >>> image = numpy.fft.fft2(ascent)\n    >>> result = ndimage.fourier_shift(image, shift=200)\n    >>> result = numpy.fft.ifft2(result)\n    >>> ax1.imshow(ascent)\n    >>> ax2.imshow(result.real)  # the imaginary part is an artifact\n    >>> plt.show()\n    \"\"\"\n\n    if issubclass(image.dtype.type, numbers.Real):\n        image = image.astype(complex)\n\n    # Validate and normalize arguments\n    image, shift, n, axis = _utils._norm_args(image, shift, n=n, axis=axis)\n\n    # Constants with type converted\n    J = image.dtype.type(1j)\n\n    # Get the grid of frequencies\n    ang_freq_grid = _utils._get_ang_freq_grid(\n        image.shape,\n        chunks=image.chunks,\n        n=n,\n        axis=axis,\n        dtype=shift.dtype\n    )\n\n    # Apply shift\n    result = image.copy()\n    for ax, f in enumerate(ang_freq_grid):\n        phase_shift = da.exp((-J) * shift[ax] * f)\n        phase_shift = _utils._reshape_nd(phase_shift, ndim=image.ndim, axis=ax)\n        result *= phase_shift\n\n    return result\n\n\ndef fourier_uniform(image, size, n=-1, axis=-1):\n    \"\"\"\n    Multi-dimensional uniform fourier filter.\n\n    The array is multiplied with the fourier transform of a box of given\n    size.\n\n    Parameters\n    ----------\n    image : array_like\n        The input image.\n    size : float or sequence\n        The size of the box used for filtering.\n        If a float, `size` is the same for all axes. If a sequence, `size` has\n        to contain one value for each axis.\n    n : int, optional\n        If `n` is negative (default), then the image is assumed to be the\n        result of a complex fft.\n        If `n` is larger than or equal to zero, the image is assumed to be the\n        result of a real fft, and `n` gives the length of the array before\n        transformation along the real transform direction.\n    axis : int, optional\n        The axis of the real transform.\n\n    Returns\n    -------\n    fourier_uniform : Dask Array\n        The filtered image. If `output` is given as a parameter, None is\n        returned.\n\n    Examples\n    --------\n    >>> from scipy import ndimage, misc\n    >>> import numpy.fft\n    >>> import matplotlib.pyplot as plt\n    >>> fig, (ax1, ax2) = plt.subplots(1, 2)\n    >>> plt.gray()  # show the filtered result in grayscale\n    >>> ascent = misc.ascent()\n    >>> image = numpy.fft.fft2(ascent)\n    >>> result = ndimage.fourier_uniform(image, size=20)\n    >>> result = numpy.fft.ifft2(result)\n    >>> ax1.imshow(ascent)\n    >>> ax2.imshow(result.real)  # the imaginary part is an artifact\n    >>> plt.show()\n    \"\"\"\n\n    # Validate and normalize arguments\n    image, size, n, axis = _utils._norm_args(image, size, n=n, axis=axis)\n\n    # Get the grid of frequencies\n    freq_grid = _utils._get_freq_grid(\n        image.shape,\n        chunks=image.chunks,\n        n=n,\n        axis=axis,\n        dtype=size.dtype\n    )\n\n    # Compute uniform filter\n    result = image.copy()\n    for ax, f in enumerate(freq_grid):\n        uniform = da.sinc(size[ax] * f)\n        uniform = _utils._reshape_nd(uniform, ndim=image.ndim, axis=ax)\n        result *= uniform\n\n    return result\n"
  },
  {
    "path": "dask_image/ndfourier/_utils.py",
    "content": "# -*- coding: utf-8 -*-\n\nimport numbers\n\nimport dask.array as da\nimport numpy as np\n\n\ndef _get_freq_grid(shape, chunks, axis, n, dtype=float):\n    assert len(shape) == len(chunks)\n\n    shape = tuple(shape)\n    dtype = np.dtype(dtype).type\n\n    assert (issubclass(dtype, numbers.Real) and\n            not issubclass(dtype, numbers.Integral))\n\n    axis = axis % len(shape)\n\n    freq_grid = []\n    for ax, (s, c) in enumerate(zip(shape, chunks)):\n        if axis == ax and n > 0:\n            f = da.fft.rfftfreq(n, chunks=c).astype(dtype)\n        else:\n            f = da.fft.fftfreq(s, chunks=c).astype(dtype)\n        freq_grid.append(f)\n\n    freq_grid = da.meshgrid(*freq_grid, indexing=\"ij\", sparse=True)\n\n    return freq_grid\n\n\ndef _get_ang_freq_grid(shape, chunks, axis, n, dtype=float):\n    dtype = np.dtype(dtype).type\n\n    assert (issubclass(dtype, numbers.Real) and\n            not issubclass(dtype, numbers.Integral))\n\n    pi = dtype(np.pi)\n\n    freq_grid = _get_freq_grid(shape, chunks, axis, n, dtype=dtype)\n    ang_freq_grid = tuple((2 * pi) * f for f in freq_grid)\n\n    return ang_freq_grid\n\n\ndef _norm_args(a, s, n=-1, axis=-1):\n    if issubclass(a.dtype.type, numbers.Integral):\n        a = a.astype(float)\n\n    if isinstance(s, numbers.Number):\n        s = np.array(a.ndim * [s])\n    elif not isinstance(s, da.Array):\n        s = np.array(s)\n\n    if issubclass(s.dtype.type, numbers.Integral):\n        s = s.astype(a.real.dtype)\n    elif not issubclass(s.dtype.type, numbers.Real):\n        raise TypeError(\"The `s` must contain real value(s).\")\n    if s.shape != (a.ndim,):\n        raise RuntimeError(\n            \"Shape of `s` must be 1-D and equal to the input's rank.\"\n        )\n\n    if n != -1 and a.shape[axis] != (n // 2 + 1):\n        raise NotImplementedError(\n            \"In the case of real-valued images, it is required that \"\n            \"(n // 2 + 1) == image.shape[axis].\"\n        )\n\n    return (a, s, n, axis)\n\n\ndef _reshape_nd(arr, ndim, axis):\n    \"\"\"Promote a 1d array to ndim with non-singleton size along axis.\"\"\"\n    nd_shape = (1,) * axis + (arr.size,) + (1,) * (ndim - axis - 1)\n    return arr.reshape(nd_shape)\n"
  },
  {
    "path": "dask_image/ndinterp/__init__.py",
    "content": "__all__ = [\n    \"affine_transform\",\n    \"map_coordinates\",\n    \"rotate\",\n    \"spline_filter\",\n    \"spline_filter1d\",\n]\n\nfrom ._affine_transform import affine_transform\nfrom ._map_coordinates import map_coordinates\nfrom ._rotate import rotate\nfrom ._spline_filters import spline_filter, spline_filter1d\n\naffine_transform.__module__ == __name__\nmap_coordinates.__module__ == __name__\nrotate.__module__ == __name__\nspline_filter.__module__ == __name__\nspline_filter1d.__module__ == __name__\n"
  },
  {
    "path": "dask_image/ndinterp/_affine_transform.py",
    "content": "# -*- coding: utf-8 -*-\nfrom itertools import product\n\nimport dask.array as da\nimport numpy as np\nfrom dask.base import tokenize\nfrom dask.highlevelgraph import HighLevelGraph\nimport scipy\nfrom scipy.ndimage import affine_transform as ndimage_affine_transform\n\nfrom ._spline_filters import spline_filter\nfrom ..dispatch._dispatch_ndinterp import (\n    dispatch_affine_transform,\n    dispatch_asarray,\n)\n\n\ndef affine_transform(\n        image,\n        matrix,\n        offset=0.0,\n        output_shape=None,\n        order=1,\n        output_chunks=None,\n        **kwargs\n):\n    \"\"\"Apply an affine transform using Dask.\n\n    For every output chunk, only the slice containing the relevant part\n    of the image is processed. Chunkwise processing is performed\n    either using `ndimage.affine_transform` or\n    `cupyx.scipy.ndimage.affine_transform`, depending on the input type.\n\n    Notes\n    -----\n    Differences to `ndimage.affine_transformation`:\n        - currently, prefiltering is not supported\n          (affecting the output in case of interpolation `order > 1`)\n        - default order is 1\n        - modes 'reflect', 'mirror' and 'wrap' are not supported\n\n    Arguments equal to `ndimage.affine_transformation`,\n    except for `output_chunks`.\n\n    Parameters\n    ----------\n    image : array_like (Numpy Array, Cupy Array, Dask Array...)\n        The image array.\n    matrix : array (ndim,), (ndim, ndim), (ndim, ndim+1) or (ndim+1, ndim+1)\n        Transformation matrix.\n    offset : float or sequence, optional\n        The offset into the array where the transform is applied. If a float,\n        `offset` is the same for each axis. If a sequence, `offset` should\n        contain one value for each axis.\n    output_shape : tuple of ints, optional\n        The shape of the array to be returned.\n    order : int, optional\n        The order of the spline interpolation. Note that for order>1\n        scipy's affine_transform applies prefiltering, which is not\n        yet supported and skipped in this implementation.\n    output_chunks : tuple of ints, optional\n        The shape of the chunks of the output Dask Array.\n\n    Returns\n    -------\n    affine_transform : Dask Array\n        A dask array representing the transformed output\n\n    \"\"\"\n\n    if not isinstance(image, da.core.Array):\n        image = da.from_array(image)\n\n    if output_shape is None:\n        output_shape = image.shape\n\n    if output_chunks is None:\n        output_chunks = image.shape\n\n    # Perform test run to ensure parameter validity.\n    ndimage_affine_transform(np.zeros([0] * image.ndim),\n                             matrix,\n                             offset)\n\n    # Make sure parameters contained in matrix and offset\n    # are not overlapping, i.e. that the offset is valid as\n    # it needs to be modified for each chunk.\n    # Further parameter checks are performed directly by\n    # `ndimage.affine_transform`.\n\n    matrix = np.asarray(matrix)\n    offset = np.asarray(offset).squeeze()\n\n    # these lines were copied and adapted from `ndimage.affine_transform`\n    if (matrix.ndim == 2 and matrix.shape[1] == image.ndim + 1 and\n            (matrix.shape[0] in [image.ndim, image.ndim + 1])):\n\n        # assume input is homogeneous coordinate transformation matrix\n        offset = matrix[:image.ndim, image.ndim]\n        matrix = matrix[:image.ndim, :image.ndim]\n\n    cval = kwargs.pop('cval', 0)\n    mode = kwargs.pop('mode', 'constant')\n    prefilter = kwargs.pop('prefilter', False)\n\n    supported_modes = ['constant', 'nearest']\n    if scipy.__version__ > np.lib.NumpyVersion('1.6.0'):\n        supported_modes += ['grid-constant']\n    if mode in ['wrap', 'reflect', 'mirror', 'grid-mirror', 'grid-wrap']:\n        raise NotImplementedError(\n            f\"Mode {mode} is not currently supported. It must be one of \"\n            f\"{supported_modes}.\")\n\n    # process kwargs\n    if prefilter and order > 1:\n        # prefilter is not yet supported for all modes\n        if mode in ['nearest', 'grid-constant']:\n            raise NotImplementedError(\n                f\"order > 1 with mode='{mode}' is not supported. Currently \"\n                f\"prefilter is only supported with mode='constant'.\"\n            )\n        image = spline_filter(image, order, output=np.float64,\n                              mode=mode)\n\n    n = image.ndim\n    image_shape = image.shape\n\n    # calculate output array properties\n    normalized_chunks = da.core.normalize_chunks(output_chunks,\n                                                 tuple(output_shape))\n    block_indices = product(*(range(len(bds)) for bds in normalized_chunks))\n    block_offsets = [np.cumsum((0,) + bds[:-1]) for bds in normalized_chunks]\n\n    # use dispatching mechanism to determine backend\n    affine_transform_method = dispatch_affine_transform(image)\n    asarray_method = dispatch_asarray(image)\n\n    # construct dask graph for output array\n    # using unique and deterministic identifier\n    output_name = 'affine_transform-' + tokenize(image, matrix, offset,\n                                                 output_shape, output_chunks,\n                                                 kwargs)\n    output_layer = {}\n    rel_images = []\n    for ib, block_ind in enumerate(block_indices):\n\n        out_chunk_shape = [normalized_chunks[dim][block_ind[dim]]\n                           for dim in range(n)]\n        out_chunk_offset = [block_offsets[dim][block_ind[dim]]\n                            for dim in range(n)]\n\n        out_chunk_edges = np.array([i for i in np.ndindex(tuple([2] * n))])\\\n            * np.array(out_chunk_shape) + np.array(out_chunk_offset)\n\n        # map output chunk edges onto input image coordinates\n        # to define the input region relevant for the current chunk\n        if matrix.ndim == 1 and len(matrix) == image.ndim:\n            rel_image_edges = matrix * out_chunk_edges + offset\n        else:\n            rel_image_edges = np.dot(matrix, out_chunk_edges.T).T + offset\n\n        rel_image_i = np.min(rel_image_edges, 0)\n        rel_image_f = np.max(rel_image_edges, 0)\n\n        # Calculate edge coordinates required for the footprint of the\n        # spline kernel according to\n        # https://github.com/scipy/scipy/blob/9c0d08d7d11fc33311a96d2ac3ad73c8f6e3df00/scipy/ndimage/src/ni_interpolation.c#L412-L419 # noqa: E501\n        # Also see this discussion:\n        # https://github.com/dask/dask-image/issues/24#issuecomment-706165593 # noqa: E501\n        for dim in range(n):\n\n            if order % 2 == 0:\n                rel_image_i[dim] += 0.5\n                rel_image_f[dim] += 0.5\n\n            rel_image_i[dim] = np.floor(rel_image_i[dim]) - order // 2\n            rel_image_f[dim] = np.floor(rel_image_f[dim]) - order // 2 + order\n\n            if order == 0:  # required for consistency with scipy.ndimage\n                rel_image_i[dim] -= 1\n\n        # clip image coordinates to image extent\n        for dim, s in zip(range(n), image_shape):\n            rel_image_i[dim] = np.clip(rel_image_i[dim], 0, s - 1)\n            rel_image_f[dim] = np.clip(rel_image_f[dim], 0, s - 1)\n\n        rel_image_slice = tuple([slice(int(rel_image_i[dim]),\n                                       int(rel_image_f[dim]) + 2)\n                                 for dim in range(n)])\n\n        rel_image = image[rel_image_slice]\n\n        \"\"\"Block comment for future developers explaining how `offset` is\n        transformed into `offset_prime` for each output chunk.\n        Modify offset to point into cropped image.\n        y = Mx + o\n        Coordinate substitution:\n        y' = y - y0(min_coord_px)\n        x' = x - x0(chunk_offset)\n        Then:\n        y' = Mx' + o + Mx0 - y0\n        M' = M\n        o' = o + Mx0 - y0\n        \"\"\"\n\n        offset_prime = offset + np.dot(matrix, out_chunk_offset) - rel_image_i\n\n        output_layer[(output_name,) + block_ind] = (\n                        affine_transform_method,\n                        (da.core.concatenate3, rel_image.__dask_keys__()),\n                        asarray_method(matrix),\n                        offset_prime,\n                        tuple(out_chunk_shape),  # output_shape\n                        None,  # out\n                        order,\n                        mode,\n                        cval,\n                        False  # prefilter\n        )\n\n        rel_images.append(rel_image)\n\n    graph = HighLevelGraph.from_collections(output_name, output_layer,\n                                            dependencies=[image] + rel_images)\n\n    meta = dispatch_asarray(image)([0]).astype(image.dtype)\n\n    transformed = da.Array(graph,\n                           output_name,\n                           shape=tuple(output_shape),\n                           # chunks=output_chunks,\n                           chunks=normalized_chunks,\n                           meta=meta)\n\n    return transformed\n"
  },
  {
    "path": "dask_image/ndinterp/_map_coordinates.py",
    "content": "# -*- coding: utf-8 -*-\n\nfrom dask import delayed\nimport dask.array as da\nimport numpy as np\nfrom dask.base import tokenize\nfrom scipy.ndimage import map_coordinates as ndimage_map_coordinates\nfrom scipy.ndimage import labeled_comprehension as\\\n    ndimage_labeled_comprehension\n\nfrom ..dispatch._utils import get_type\n\n\ndef _map_single_coordinates_array_chunk(\n        input, coordinates, order=3, mode='constant',\n        cval=0.0, prefilter=False):\n    \"\"\"\n    Central helper function for implementing map_coordinates.\n\n    Receives 'input' as a dask array and computed coordinates.\n\n    Implementation details and steps:\n    1) associate each coordinate in coordinates with the chunk\n       it maps to in the input\n    2) for each input chunk that has been associated to at least one\n       coordinate, calculate the minimal slice required to map\n       all coordinates that are associated to it (note that resulting slice\n       coordinates can lie outside of the coordinate's chunk)\n    3) for each previously obtained slice and its associated coordinates,\n       define a dask task and apply ndimage.map_coordinates\n    4) outputs of ndimage.map_coordinates are rearranged to match input order\n    \"\"\"\n\n    # STEP 1: Associate each coordinate in coordinates with\n    # the chunk it maps to in the input array\n\n    # get the input chunks each coordinate maps onto\n    coords_input_chunk_locations = coordinates.T // np.array(input.chunksize)\n\n    # map out-of-bounds chunk locations to valid input chunks\n    coords_input_chunk_locations = np.clip(\n        coords_input_chunk_locations, 0, np.array(input.numblocks) - 1\n    )\n\n    # all input chunk locations\n    input_chunk_locations = np.array([i for i in np.ndindex(input.numblocks)])\n\n    # linearize input chunk locations\n    coords_input_chunk_locations_linear = np.sum(\n        coords_input_chunk_locations * np.array(\n            [np.prod(input.numblocks[:dim])\n                for dim in range(input.ndim)])[::-1],\n        axis=1, dtype=np.int64)\n\n    # determine the input chunks that have coords associated and\n    # count how many coords map onto each input chunk\n    chunk_indices_count = np.bincount(coords_input_chunk_locations_linear,\n                                      minlength=np.prod(input.numblocks))\n    required_input_chunk_indices = np.where(chunk_indices_count > 0)[0]\n    required_input_chunks = input_chunk_locations[required_input_chunk_indices]\n    coord_rc_count = chunk_indices_count[required_input_chunk_indices]\n\n    # inverse mapping: input chunks to coordinates\n    required_input_chunk_coords_indices = \\\n        [np.where(coords_input_chunk_locations_linear == irc)[0]\n            for irc in required_input_chunk_indices]\n\n    # STEP 2: for each input chunk that has been associated to at least\n    # one coordinate, calculate the minimal slice required to map all\n    # coordinates that are associated to it (note that resulting slice\n    # coordinates can lie outside of the coordinate's chunk)\n\n    # determine the slices of the input array that are required for\n    # mapping all coordinates associated to a given input chunk.\n    # Note that this slice can be larger than the given chunk when coords\n    # lie at chunk borders.\n    # (probably there's a more efficient way to do this)\n    input_slices_lower = np.array([np.clip(\n            ndimage_labeled_comprehension(\n                np.floor(coordinates[dim] - order // 2),\n                coords_input_chunk_locations_linear,\n                required_input_chunk_indices,\n                np.min, np.int64, 0),\n            0, input.shape[dim] - 1)\n        for dim in range(input.ndim)])\n\n    input_slices_upper = np.array([np.clip(\n            ndimage_labeled_comprehension(\n                np.ceil(coordinates[dim] + order // 2) + 1,\n                coords_input_chunk_locations_linear,\n                required_input_chunk_indices,\n                np.max, np.int64, 0),\n            0, input.shape[dim])\n        for dim in range(input.ndim)])\n\n    input_slices = np.array([input_slices_lower, input_slices_upper])\\\n        .swapaxes(1, 2)\n\n    # STEP 3: For each previously obtained slice and its associated\n    # coordinates, define a dask task and apply ndimage.map_coordinates\n\n    # prepare building dask graph\n    # define one task per associated input chunk\n    name = \"map_coordinates_chunk-%s\" % tokenize(\n        input,\n        coordinates,\n        order,\n        mode,\n        cval,\n        prefilter\n        )\n\n    keys = [(name, i) for i in range(len(required_input_chunks))]\n\n    # pair map_coordinates calls with input slices and mapped coordinates\n    values = []\n    for irc in range(len(required_input_chunks)):\n\n        ric_slice = [slice(\n            input_slices[0][irc][dim],\n            input_slices[1][irc][dim])\n            for dim in range(input.ndim)]\n        ric_offset = input_slices[0][irc]\n\n        values.append((\n            ndimage_map_coordinates,\n            input[tuple(ric_slice)],\n            coordinates[:, required_input_chunk_coords_indices[irc]]\n            - ric_offset[:, None],\n            None,\n            order,\n            mode,\n            cval,\n            prefilter\n        ))\n\n    # build dask graph\n    dsk = dict(zip(keys, values))\n    ar = da.Array(dsk, name, tuple([list(coord_rc_count)]), input.dtype)\n\n    # STEP 4: rearrange outputs of ndimage.map_coordinates\n    # to match input order\n    orig_order = np.argsort(\n        [ic for ric_ci in required_input_chunk_coords_indices\n            for ic in ric_ci])\n\n    # compute result and reorder\n    # (ordering first would probably unnecessarily inflate the task graph)\n    return ar.compute()[orig_order]\n\n\ndef map_coordinates(input, coordinates, order=3,\n                    mode='constant', cval=0.0, prefilter=False):\n    \"\"\"\n    Wraps ndimage.map_coordinates.\n\n    Both the input and coordinate arrays can be dask arrays.\n    GPU arrays are not supported.\n\n    For each chunk in the coordinates array, the coordinates are computed\n    and mapped onto the required slices of the input array. One task is\n    is defined for each input array chunk that has been associated to at\n    least one coordinate. The outputs of the tasks are then rearranged to\n    match the input order. For more details see the docstring of\n    '_map_single_coordinates_array_chunk'.\n\n    Using this function together with schedulers that support\n    parallelism (threads, processes, distributed) makes sense in the\n    case of either a large input array or a large coordinates array.\n    When both arrays are large, it is recommended to use the\n    single-threaded scheduler. A scheduler can be specified using e.g.\n    `with dask.config.set(scheduler='threads'): ...`.\n\n    input : array_like\n        The input array.\n    coordinates : array_like\n        The coordinates at which to sample the input.\n    order : int, optional\n        The order of the spline interpolation, default is 3. The order has to\n        be in the range 0-5.\n    mode : boundary behavior mode, optional\n    cval : float, optional\n        Value to fill past edges of input if mode is 'constant'. Default is 0.0\n    prefilter : bool, optional\n        If True, prefilter the input before interpolation. Default is False.\n        Warning: prefilter is True by default in\n        `scipy.ndimage.map_coordinates`. Prefiltering here is performed on a\n        chunk-by-chunk basis, which may lead to different results than\n        `scipy.ndimage.map_coordinates` in case of chunked input arrays\n        and order > 1.\n        Note: prefilter is not necessary when:\n          - You are using nearest neighbour interpolation, by setting order=0\n          - You are using linear interpolation, by setting order=1, or\n          - You have already prefiltered the input array,\n          using the spline_filter or spline_filter1d functions.\n\n    Comments:\n      - in case of a small coordinate array, it might make sense to rechunk\n        it into a single chunk\n      - note the different default for `prefilter` compared to\n        `scipy.ndimage.map_coordinates`, which is True by default.\n    \"\"\"\n    if \"cupy\" in str(get_type(input)) or \"cupy\" in str(get_type(coordinates)):\n        raise NotImplementedError(\n            \"GPU cupy arrays are not supported by \"\n            \"dask_image.ndinterp.map_overlap\")\n\n    # if coordinate array is not a dask array, convert it into one\n    if type(coordinates) is not da.Array:\n        coordinates = da.from_array(coordinates, chunks=coordinates.shape)\n    else:\n        # make sure indices are not split across chunks, i.e. that there's\n        # no chunking along the first dimension\n        if len(coordinates.chunks[0]) > 1:\n            coordinates = da.rechunk(\n                coordinates,\n                (-1,) + coordinates.chunks[1:])\n\n    # if the input array is not a dask array, convert it into one\n    if type(input) is not da.Array:\n        input = da.from_array(input, chunks=input.shape)\n\n    # Map each chunk of the coordinates array onto the entire input array.\n    # 'input' is passed to `_map_single_coordinates_array_chunk` using a bit of\n    # a dirty trick: it is split into its components and passed as a delayed\n    # object, which reconstructs the original array when the task is\n    # executed. Therefore two `compute` calls are required to obtain the\n    # final result, one of which is peformed by\n    # `_map_single_coordinates_array_chunk`\n    # Discussion https://dask.discourse.group/t/passing-dask-objects-to-delayed-computations-without-triggering-compute/1441 # noqa: E501\n    output = da.map_blocks(\n        _map_single_coordinates_array_chunk,\n        delayed(da.Array)(input.dask, input.name, input.chunks, input.dtype),\n        coordinates,\n        order=order,\n        mode=mode,\n        cval=cval,\n        prefilter=prefilter,\n        dtype=input.dtype,\n        chunks=coordinates.chunks[1:],\n        drop_axis=0,\n    )\n\n    return output\n"
  },
  {
    "path": "dask_image/ndinterp/_rotate.py",
    "content": "# -*- coding: utf-8 -*-\n\nimport dask.array as da\nimport numpy as np\nfrom scipy.special import sindg, cosdg\n\nfrom ._affine_transform import affine_transform\n\n\ndef rotate(\n        input_arr,\n        angle,\n        axes=(1, 0),\n        reshape=True,\n        output_chunks=None,\n        **kwargs,\n        ):\n    \"\"\"Rotate an array using Dask.\n\n    The array is rotated in the plane defined by the two axes given by the\n    `axes` parameter using spline interpolation of the requested order.\n\n    Chunkwise processing is performed using\n    `dask_image.ndinterp.affine_transform`, for which further parameters\n    supported by the ndimage functions can be passed as keyword arguments.\n\n    Notes\n    -----\n    Differences to `ndimage.rotate`:\n        - currently, prefiltering is not supported\n          (affecting the output in case of interpolation `order > 1`)\n        - default order is 1\n        - modes 'reflect', 'mirror' and 'wrap' are not supported\n\n        Arguments are equal to `ndimage.rotate` except for\n        - `output` (not present here)\n        - `output_chunks` (relevant in the dask array context)\n\n    Parameters\n    ----------\n    input_arr : array_like (Numpy Array, Cupy Array, Dask Array...)\n        The image array.\n    angle : float\n        The rotation angle in degrees.\n    axes : tuple of 2 ints, optional\n        The two axes that define the plane of rotation. Default is the first\n        two axes.\n    reshape : bool, optional\n        If `reshape` is true, the output shape is adapted so that the input\n        array is contained completely in the output. Default is True.\n    output_chunks : tuple of ints, optional\n        The shape of the chunks of the output Dask Array.\n    **kwargs : dict, optional\n        Additional keyword arguments are passed to\n        `dask_image.ndinterp.affine_transform`.\n\n    Returns\n    -------\n    rotate : Dask Array\n        A dask array representing the rotated input.\n\n    Examples\n    --------\n    >>> from scipy import ndimage, misc\n    >>> import matplotlib.pyplot as plt\n    >>> import dask.array as da\n    >>> fig = plt.figure(figsize=(10, 3))\n    >>> ax1, ax2, ax3 = fig.subplots(1, 3)\n    >>> img = da.from_array(misc.ascent(),chunks=(64,64))\n    >>> img_45 = dask_image.ndinterp.rotate(img, 45, reshape=False)\n    >>> full_img_45 = dask_image.ndinterp.rotate(img, 45, reshape=True)\n    >>> ax1.imshow(img, cmap='gray')\n    >>> ax1.set_axis_off()\n    >>> ax2.imshow(img_45, cmap='gray')\n    >>> ax2.set_axis_off()\n    >>> ax3.imshow(full_img_45, cmap='gray')\n    >>> ax3.set_axis_off()\n    >>> fig.set_tight_layout(True)\n    >>> plt.show()\n    >>> print(img.shape)\n    (512, 512)\n    >>> print(img_45.shape)\n    (512, 512)\n    >>> print(full_img_45.shape)\n    (724, 724)\n    \"\"\"\n\n    if not isinstance(input_arr, da.core.Array):\n        input_arr = da.from_array(input_arr)\n\n    if output_chunks is None:\n        output_chunks = input_arr.chunksize\n\n    ndim = input_arr.ndim\n\n    if ndim < 2:\n        raise ValueError('input array should be at least 2D')\n\n    axes = list(axes)\n\n    if len(axes) != 2:\n        raise ValueError('axes should contain exactly two values')\n\n    if not all([float(ax).is_integer() for ax in axes]):\n        raise ValueError('axes should contain only integer values')\n\n    if axes[0] < 0:\n        axes[0] += ndim\n    if axes[1] < 0:\n        axes[1] += ndim\n    if axes[0] < 0 or axes[1] < 0 or axes[0] >= ndim or axes[1] >= ndim:\n        raise ValueError('invalid rotation plane specified')\n\n    axes.sort()\n\n    c, s = cosdg(angle), sindg(angle)\n\n    rot_matrix = np.array([[c, s],\n                           [-s, c]])\n\n    img_shape = np.asarray(input_arr.shape)\n    in_plane_shape = img_shape[axes]\n\n    if reshape:\n        # Compute transformed input bounds\n        iy, ix = in_plane_shape\n        in_bounds = np.array([[0, 0, iy, iy],\n                              [0, ix, 0, ix]])\n        out_bounds = rot_matrix @ in_bounds\n        # Compute the shape of the transformed input plane\n        out_plane_shape = (np.ptp(out_bounds, axis=1) + 0.5).astype(int)\n    else:\n        out_plane_shape = img_shape[axes]\n\n    output_shape = np.array(img_shape)\n    output_shape[axes] = out_plane_shape\n    output_shape = tuple(output_shape)\n\n    out_center = rot_matrix @ ((out_plane_shape - 1) / 2)\n    in_center = (in_plane_shape - 1) / 2\n    offset = in_center - out_center\n\n    matrix_nd = np.eye(ndim)\n    offset_nd = np.zeros(ndim)\n\n    for o_x, idx in enumerate(axes):\n\n        matrix_nd[idx, axes[0]] = rot_matrix[o_x, 0]\n        matrix_nd[idx, axes[1]] = rot_matrix[o_x, 1]\n\n        offset_nd[idx] = offset[o_x]\n\n    output = affine_transform(\n        input_arr,\n        matrix=matrix_nd,\n        offset=offset_nd,\n        output_shape=output_shape,\n        output_chunks=output_chunks,\n        **kwargs,\n        )\n\n    return output\n"
  },
  {
    "path": "dask_image/ndinterp/_spline_filters.py",
    "content": "# -*- coding: utf-8 -*-\n\nimport functools\nimport math\n\nimport dask.array as da\nimport numpy as np\nimport scipy\n\nfrom ..dispatch._dispatch_ndinterp import (\n    dispatch_spline_filter,\n    dispatch_spline_filter1d,\n)\nfrom ..ndfilters._utils import _get_depth_boundary, _update_wrapper\n\n\n# magnitude of the maximum filter pole for each order\n# (obtained from scipy/ndimage/src/ni_splines.c)\n_maximum_pole = {\n    2: 0.171572875253809902396622551580603843,\n    3: 0.267949192431122706472553658494127633,\n    4: 0.361341225900220177092212841325675255,\n    5: 0.430575347099973791851434783493520110,\n}\n\n\ndef _get_default_depth(order, tol=1e-8):\n    \"\"\"Determine the approximate depth needed for a given tolerance.\n\n    Here depth is chosen as the smallest integer such that ``|p| ** n < tol``\n    where `|p|` is the magnitude of the largest pole in the IIR filter.\n    \"\"\"\n\n    return math.ceil(np.log(tol) / np.log(_maximum_pole[order]))\n\n\n@_update_wrapper(scipy.ndimage.spline_filter)\ndef spline_filter(\n        image,\n        order=3,\n        output=np.float64,\n        mode='mirror',\n        output_chunks=None,\n        *,\n        depth=None,\n        **kwargs\n):\n\n    if not isinstance(image, da.core.Array):\n        image = da.from_array(image)\n\n    # use dispatching mechanism to determine backend\n    spline_filter_method = dispatch_spline_filter(image)\n\n    try:\n        dtype = np.dtype(output)\n    except TypeError:     # pragma: no cover\n        raise TypeError(  # pragma: no cover\n            \"Could not coerce the provided output to a dtype. \"\n            \"Passing array to output is not currently supported.\"\n        )\n\n    if depth is None:\n        depth = _get_default_depth(order)\n\n    if mode == 'wrap':\n        raise NotImplementedError(\n            \"mode='wrap' is unsupported. It is recommended to use 'grid-wrap' \"\n            \"instead.\"\n        )\n\n    # Note: depths of 12 and 24 give results matching SciPy to approximately\n    #       single and double precision accuracy, respectively.\n    boundary = \"periodic\" if mode == 'grid-wrap' else \"none\"\n    depth, boundary = _get_depth_boundary(image.ndim, depth, boundary)\n\n    # cannot pass a func kwarg named \"output\" to map_overlap\n    spline_filter_method = functools.partial(spline_filter_method,\n                                             output=dtype)\n\n    result = image.map_overlap(\n        spline_filter_method,\n        depth=depth,\n        boundary=boundary,\n        dtype=dtype,\n        meta=image._meta,\n        # spline_filter kwargs\n        order=order,\n        mode=mode,\n    )\n\n    return result\n\n\n@_update_wrapper(scipy.ndimage.spline_filter1d)\ndef spline_filter1d(\n        image,\n        order=3,\n        axis=-1,\n        output=np.float64,\n        mode='mirror',\n        output_chunks=None,\n        *,\n        depth=None,\n        **kwargs\n):\n\n    if not isinstance(image, da.core.Array):\n        image = da.from_array(image)\n\n    # use dispatching mechanism to determine backend\n    spline_filter1d_method = dispatch_spline_filter1d(image)\n\n    try:\n        dtype = np.dtype(output)\n    except TypeError:     # pragma: no cover\n        raise TypeError(  # pragma: no cover\n            \"Could not coerce the provided output to a dtype. \"\n            \"Passing array to output is not currently supported.\"\n        )\n\n    if depth is None:\n        depth = _get_default_depth(order)\n\n    # use depth 0 on all axes except the filtered axis\n    if not np.isscalar(depth):\n        raise ValueError(\"depth must be a scalar value\")\n    depths = [0] * image.ndim\n    depths[axis] = depth\n\n    if mode == 'wrap':\n        raise NotImplementedError(\n            \"mode='wrap' is unsupported. It is recommended to use 'grid-wrap' \"\n            \"instead.\"\n        )\n\n    # cannot pass a func kwarg named \"output\" to map_overlap\n    spline_filter1d_method = functools.partial(spline_filter1d_method,\n                                               output=dtype)\n\n    result = image.map_overlap(\n        spline_filter1d_method,\n        depth=tuple(depths),\n        boundary=\"periodic\" if mode == 'grid-wrap' else \"none\",\n        dtype=dtype,\n        meta=image._meta,\n        # spline_filter1d kwargs\n        order=order,\n        axis=axis,\n        mode=mode,\n    )\n\n    return result\n"
  },
  {
    "path": "dask_image/ndmeasure/__init__.py",
    "content": "# -*- coding: utf-8 -*-\n\nimport collections\nimport functools\nimport operator\nimport warnings\nfrom dask import compute, delayed\nimport dask.config as dask_config\n\nimport dask.array as da\nimport dask.bag as db\nimport numpy as np\n\nfrom . import _utils\nfrom ._utils import _label\nfrom ._utils._find_objects import (\n    _array_chunk_location,\n    _find_bounding_boxes,\n    _find_objects,\n)\n\n__all__ = [\n    \"area\",\n    \"center_of_mass\",\n    \"extrema\",\n    \"histogram\",\n    \"label\",\n    \"labeled_comprehension\",\n    \"maximum\",\n    \"maximum_position\",\n    \"mean\",\n    \"median\",\n    \"minimum\",\n    \"minimum_position\",\n    \"standard_deviation\",\n    \"sum\",\n    \"sum_labels\",\n    \"variance\",\n]\n\n\ndef area(image, label_image=None, index=None):\n    \"\"\"Find the area of specified subregions in an image.\n\n    Parameters\n    ----------\n    image : ndarray\n        N-D image data\n    label_image : ndarray, optional\n        Image features noted by integers.\n        If None (default), returns area of total image dimensions.\n    index : int or sequence of ints, optional\n        Labels to include in output.  If None (default), all values where\n        non-zero ``label_image`` are used.\n        The ``index`` argument only works when ``label_image`` is specified.\n\n    Returns\n    -------\n    area : ndarray\n        Area of ``index`` selected regions from ``label_image``.\n\n    Example\n    -------\n    >>> import dask.array as da\n    >>> image = da.random.random((3, 3))\n    >>> label_image = da.from_array(\n        [[1, 1, 0],\n         [1, 0, 3],\n         [0, 7, 0]], chunks=(1, 3))\n\n    >>> # No labels given, returns area of total image dimensions\n    >>> area(image)\n    9\n\n    >>> # Combined area of all non-zero labels\n    >>> area(image, label_image).compute()\n    5\n\n    >>> # Areas of selected labels selected with the ``index`` keyword argument\n    >>> area(image, label_image, index=[0, 1, 2, 3]).compute()\n    array([4, 3, 0, 1], dtype=int64)\n    \"\"\"\n\n    if label_image is None:\n        return da.prod(np.array([i for i in image.shape]))\n\n    else:\n        image, label_image, index = _utils._norm_input_labels_index(\n            image, label_image, index\n        )\n\n        ones = da.ones(\n            label_image.shape, dtype=bool, chunks=label_image.chunks\n        )\n\n        area_lbl = labeled_comprehension(\n            ones, label_image, index, len, int, int(0)\n        )\n\n        return area_lbl\n\n\ndef center_of_mass(image, label_image=None, index=None):\n    \"\"\"\n    Find the center of mass over an image at specified subregions.\n\n    Parameters\n    ----------\n    image : ndarray\n        N-D image data\n    label_image : ndarray, optional\n        Image features noted by integers. If None (default), all values.\n    index : int or sequence of ints, optional\n        Labels to include in output.  If None (default), all values where\n        non-zero ``label_image`` are used.\n\n        The ``index`` argument only works when ``label_image`` is specified.\n\n    Returns\n    -------\n    center_of_mass : ndarray\n        Coordinates of centers-of-mass of ``image`` over the ``index`` selected\n        regions from ``label_image``.\n    \"\"\"\n\n    image, label_image, index = _utils._norm_input_labels_index(\n        image, label_image, index\n    )\n\n    # SciPy transposes these for some reason.\n    # So we do the same thing here.\n    # This only matters if index is some array.\n    index = index.T\n\n    out_dtype = np.dtype([(\"com\", float, (image.ndim,))])\n    default_1d = np.full((1,), np.nan, dtype=out_dtype)\n\n    func = functools.partial(\n        _utils._center_of_mass, shape=image.shape, dtype=out_dtype\n    )\n    com_lbl = labeled_comprehension(\n        image, label_image, index,\n        func, out_dtype, default_1d[0], pass_positions=True\n    )\n    com_lbl = com_lbl[\"com\"]\n\n    return com_lbl\n\n\ndef extrema(image, label_image=None, index=None):\n    \"\"\"\n    Find the min and max with positions over an image at specified subregions.\n\n    Parameters\n    ----------\n    image : ndarray\n        N-D image data\n    label_image : ndarray, optional\n        Image features noted by integers. If None (default), all values.\n    index : int or sequence of ints, optional\n        Labels to include in output.  If None (default), all values where\n        non-zero ``label_image`` are used.\n\n        The ``index`` argument only works when ``label_image`` is specified.\n\n    Returns\n    -------\n    minimums, maximums, min_positions, max_positions : tuple of ndarrays\n        Values and coordinates of minimums and maximums in each feature.\n    \"\"\"\n\n    image, label_image, index = _utils._norm_input_labels_index(\n        image, label_image, index\n    )\n\n    out_dtype = np.dtype([\n        (\"min_val\", image.dtype),\n        (\"max_val\", image.dtype),\n        (\"min_pos\", np.dtype(int), image.ndim),\n        (\"max_pos\", np.dtype(int), image.ndim)\n    ])\n    default_1d = np.zeros((1,), dtype=out_dtype)\n\n    func = functools.partial(\n        _utils._extrema, shape=image.shape, dtype=out_dtype\n    )\n    extrema_lbl = labeled_comprehension(\n        image, label_image, index,\n        func, out_dtype, default_1d[0], pass_positions=True\n    )\n    extrema_lbl = collections.OrderedDict([\n        (k, extrema_lbl[k])\n        for k in [\"min_val\", \"max_val\", \"min_pos\", \"max_pos\"]\n    ])\n\n    for pos_key in [\"min_pos\", \"max_pos\"]:\n        pos_nd = extrema_lbl[pos_key]\n\n        if index.ndim == 0:\n            pos_nd = da.squeeze(pos_nd)\n        elif index.ndim > 1:\n            pos_nd = pos_nd.reshape(\n                (int(np.prod(pos_nd.shape[:-1])), pos_nd.shape[-1])\n            )\n\n        extrema_lbl[pos_key] = pos_nd\n\n    result = tuple(extrema_lbl.values())\n\n    return result\n\n\ndef find_objects(label_image):\n    \"\"\"Return bounding box slices for each object labelled by integers.\n\n    Parameters\n    ----------\n    label_image : ndarray\n        Image features noted by integers.\n\n    Returns\n    -------\n    Dask dataframe\n        Each row represents an individual integer label. Columns contain the\n        slice information for the object boundaries in each dimension\n        (dimensions are named: 0, 1, ..., nd).\n\n    Notes\n    -----\n    You must have the optional dependencies ``dask[dataframe]`` and\n    ``pandas`` installed to use the ``find_objects`` function. They can\n    be installed together via the ``dataframe`` extras group:\n    ``pip install dask-image[dataframe]``.\n    \"\"\"\n    try:\n        import pandas  # noqa: F401  # used by the private helpers below\n        import dask.dataframe as dd\n    except ImportError as e:\n        raise ImportError(\n            \"dask_image.ndmeasure.find_objects requires the optional \"\n            \"dependencies `dask[dataframe]` and `pandas`. Install them \"\n            \"with `pip install dask-image[dataframe]`.\"\n        ) from e\n\n    if label_image.dtype.char not in np.typecodes['AllInteger']:\n        raise ValueError(\"find_objects only accepts integer dtype arrays\")\n\n    block_iter = zip(\n        np.ndindex(*label_image.numblocks),\n        map(functools.partial(operator.getitem, label_image),\n            da.core.slices_from_chunks(label_image.chunks))\n    )\n\n    arrays = []\n    for block_id, block in block_iter:\n        array_location = _array_chunk_location(block_id, label_image.chunks)\n        arrays.append(delayed(_find_bounding_boxes)(block, array_location))\n\n    bag = db.from_sequence(arrays)\n    result = bag.fold(\n        functools.partial(_find_objects, label_image.ndim), split_every=2\n    ).to_delayed()\n    meta = dd.utils.make_meta([(i, object) for i in range(label_image.ndim)])\n    # avoid the user having to call compute twice on result\n    result = delayed(compute)(result)[0]\n\n    with dask_config.set({'dataframe.convert-string': False}):\n        result = dd.from_delayed(\n            result, meta=meta, prefix=\"find-objects-\", verify_meta=False\n        )\n\n    return result\n\n\ndef histogram(image,\n              min,\n              max,\n              bins,\n              label_image=None,\n              index=None):\n    \"\"\"\n    Find the histogram over an image at specified subregions.\n\n    Histogram calculates the frequency of values in an array within bins\n    determined by ``min``, ``max``, and ``bins``. The ``label_image`` and\n    ``index`` keywords can limit the scope of the histogram to specified\n    sub-regions within the array.\n\n    Parameters\n    ----------\n    image : ndarray\n        N-D image data\n    min : int\n        Minimum value of range of histogram bins.\n    max : int\n        Maximum value of range of histogram bins.\n    bins : int\n        Number of bins.\n    label_image : ndarray, optional\n        Image features noted by integers. If None (default), all values.\n    index : int or sequence of ints, optional\n        Labels to include in output.  If None (default), all values where\n        non-zero ``label_image`` are used.\n\n        The ``index`` argument only works when ``label_image`` is specified.\n\n    Returns\n    -------\n    histogram : ndarray\n        Histogram of ``image`` over the ``index`` selected regions from\n        ``label_image``.\n    \"\"\"\n\n    image, label_image, index = _utils._norm_input_labels_index(\n        image, label_image, index\n    )\n    min = int(min)\n    max = int(max)\n    bins = int(bins)\n\n    func = functools.partial(_utils._histogram, min=min, max=max, bins=bins)\n    result = labeled_comprehension(\n        image, label_image, index, func, object, None\n    )\n\n    return result\n\n\ndef label(image, structure=None, wrap_axes=None):\n    \"\"\"\n    Label features in an array.\n\n    Parameters\n    ----------\n    image : ndarray\n        An array-like object to be labeled.  Any non-zero values in ``image``\n        are counted as features and zero values are considered the background.\n    structure : ndarray, optional\n        A structuring element that defines feature connections.\n        ``structure`` must be symmetric.  If no structuring element is\n        provided, one is automatically generated with a squared connectivity\n        equal to one.  That is, for a 2-D ``image`` array, the default\n        structuring element is::\n\n            [[0,1,0],\n             [1,1,1],\n             [0,1,0]]\n\n    wrap_axes : tuple of int, optional\n        Whether labels should be wrapped across array boundaries, and if so\n        which axes.\n        This feature is not present in `ndimage.label`.\n        Examples:\n        - (0,) only wrap across the 0th axis.\n        - (0, 1) wrap across the 0th and 1st axis.\n        - (0, 1, 3)  wrap across 0th, 1st and 3rd axis.\n\n    Returns\n    -------\n    label : ndarray or int\n        An integer ndarray where each unique feature in ``image`` has a unique\n        label in the returned array.\n    num_features : int\n        How many objects were found.\n    \"\"\"\n\n    image = da.asarray(image)\n\n    labeled_blocks = np.empty(image.numblocks, dtype=object)\n\n    # First, label each block independently, incrementing the labels in that\n    # block by the total number of labels from previous blocks. This way, each\n    # block's labels are globally unique.\n    block_iter = zip(\n        np.ndindex(*image.numblocks),\n        map(functools.partial(operator.getitem, image),\n            da.core.slices_from_chunks(image.chunks))\n    )\n    index, input_block = next(block_iter)\n    labeled_blocks[index], total = _label.block_ndi_label_delayed(input_block,\n                                                                  structure)\n    for index, input_block in block_iter:\n        labeled_block, n = _label.block_ndi_label_delayed(input_block,\n                                                          structure)\n        block_label_offset = da.where(labeled_block > 0,\n                                      total,\n                                      _label.LABEL_DTYPE.type(0))\n        labeled_block += block_label_offset\n        labeled_blocks[index] = labeled_block\n        total += n\n\n    # Put all the blocks together\n    block_labeled = da.block(labeled_blocks.tolist())\n\n    # Now, build a label connectivity graph that groups labels across blocks.\n    # We use this graph to find connected components and then relabel each\n    # block according to those.\n    label_groups = _label.label_adjacency_graph(\n        block_labeled, structure, total, wrap_axes=wrap_axes\n    )\n    new_labeling = _label.connected_components_delayed(label_groups)\n    relabeled = _label.relabel_blocks(block_labeled, new_labeling)\n    n = da.max(relabeled)\n\n    return (relabeled, n)\n\n\ndef labeled_comprehension(image,\n                          label_image,\n                          index,\n                          func,\n                          out_dtype,\n                          default,\n                          pass_positions=False):\n    \"\"\"\n    Compute a function over an image at specified subregions.\n\n    Roughly equivalent to [func(image[labels == i]) for i in index].\n\n    Sequentially applies an arbitrary function (that works on array_like image)\n    to subsets of an n-D image array specified by ``label_image`` and\n    ``index``. The option exists to provide the function with positional\n    parameters as the second argument.\n\n    Parameters\n    ----------\n    image : ndarray\n        N-D image data\n    label_image : ndarray, optional\n        Image features noted by integers. If None (default), all values.\n    index : int or sequence of ints, optional\n        Labels to include in output.  If None (default), all values where\n        non-zero ``label_image`` are used.\n\n        The ``index`` argument only works when ``label_image`` is specified.\n\n    func : callable\n        Python function to apply to ``label_image`` from ``image``.\n    out_dtype : dtype\n        Dtype to use for ``result``.\n    default : int, float or None\n        Default return value when a element of ``index`` does not exist\n        in ``label_image``.\n    pass_positions : bool, optional\n        If True, pass linear indices to ``func`` as a second argument.\n        Default is False.\n\n    Returns\n    -------\n    result : ndarray\n        Result of applying ``func`` on ``image`` over the ``index`` selected\n        regions from ``label_image``.\n    \"\"\"\n\n    image, label_image, index = _utils._norm_input_labels_index(\n        image, label_image, index\n    )\n\n    out_dtype = np.dtype(out_dtype)\n    default_1d = np.full((1,), default, dtype=out_dtype)\n\n    pass_positions = bool(pass_positions)\n\n    args = (image,)\n    if pass_positions:\n        positions = _utils._ravel_shape_indices(\n            image.shape, chunks=image.chunks\n        )\n        args = (image, positions)\n\n    result = np.empty(index.shape, dtype=object)\n    for i in np.ndindex(index.shape):\n        lbl_mtch_i = (label_image == index[i])\n        args_lbl_mtch_i = tuple(e[lbl_mtch_i] for e in args)\n        result[i] = _utils._labeled_comprehension_func(\n            func, out_dtype, default_1d, *args_lbl_mtch_i\n        )\n\n    for i in range(result.ndim - 1, -1, -1):\n        result2 = result[..., 0]\n        for j in np.ndindex(index.shape[:i]):\n            result2[j] = da.stack(result[j].tolist(), axis=0)\n        result = result2\n    result = result[()][..., 0]\n\n    return result\n\n\ndef maximum(image, label_image=None, index=None):\n    \"\"\"\n    Find the maxima over an image at specified subregions.\n\n    Parameters\n    ----------\n    image : ndarray\n        N-D image data\n    label_image : ndarray, optional\n        Image features noted by integers. If None (default), all values.\n    index : int or sequence of ints, optional\n        Labels to include in output.  If None (default), all values where\n        non-zero ``label_image`` are used.\n\n        The ``index`` argument only works when ``label_image`` is specified.\n\n    Returns\n    -------\n    maxima : ndarray\n        Maxima of ``image`` over the ``index`` selected regions from\n        ``label_image``.\n    \"\"\"\n\n    image, label_image, index = _utils._norm_input_labels_index(\n        image, label_image, index\n    )\n\n    return labeled_comprehension(\n        image, label_image, index, np.max, image.dtype, image.dtype.type(0)\n    )\n\n\ndef maximum_position(image, label_image=None, index=None):\n    \"\"\"\n    Find the positions of maxima over an image at specified subregions.\n\n    For each region specified by ``label_image``, the position of the maximum\n    value of ``image`` within the region is returned.\n\n    Parameters\n    ----------\n    image : ndarray\n        N-D image data\n    label_image : ndarray, optional\n        Image features noted by integers. If None (default), all values.\n    index : int or sequence of ints, optional\n        Labels to include in output.  If None (default), all values where\n        non-zero ``label_image`` are used.\n\n        The ``index`` argument only works when ``label_image`` is specified.\n\n    Returns\n    -------\n    maxima_positions : ndarray\n        Maxima positions of ``image`` over the ``index`` selected regions from\n        ``label_image``.\n    \"\"\"\n\n    image, label_image, index = _utils._norm_input_labels_index(\n        image, label_image, index\n    )\n\n    if index.shape:\n        index = index.flatten()\n\n    out_dtype = np.dtype([(\"pos\", int, (image.ndim,))])\n    default_1d = np.zeros((1,), dtype=out_dtype)\n\n    func = functools.partial(\n        _utils._argmax, shape=image.shape, dtype=out_dtype\n    )\n    max_pos_lbl = labeled_comprehension(\n        image, label_image, index,\n        func, out_dtype, default_1d[0], pass_positions=True\n    )\n    max_pos_lbl = max_pos_lbl[\"pos\"]\n\n    if index.shape == tuple():\n        max_pos_lbl = da.squeeze(max_pos_lbl)\n\n    return max_pos_lbl\n\n\ndef mean(image, label_image=None, index=None):\n    \"\"\"\n    Find the mean over an image at specified subregions.\n\n    Parameters\n    ----------\n    image : ndarray\n        N-D image data\n    label_image : ndarray, optional\n        Image features noted by integers. If None (default), all values.\n    index : int or sequence of ints, optional\n        Labels to include in output.  If None (default), all values where\n        non-zero ``label_image`` are used.\n\n        The ``index`` argument only works when ``label_image`` is specified.\n\n    Returns\n    -------\n    means : ndarray\n        Mean of ``image`` over the ``index`` selected regions from\n        ``label_image``.\n    \"\"\"\n\n    image, label_image, index = _utils._norm_input_labels_index(\n        image, label_image, index\n    )\n\n    nan = np.float64(np.nan)\n\n    mean_lbl = labeled_comprehension(\n        image, label_image, index, np.mean, np.float64, nan\n    )\n\n    return mean_lbl\n\n\ndef median(image, label_image=None, index=None):\n    \"\"\"\n    Find the median over an image at specified subregions.\n\n    Parameters\n    ----------\n    image : ndarray\n        N-D image data\n    label_image : ndarray, optional\n        Image features noted by integers. If None (default), all values.\n    index : int or sequence of ints, optional\n        Labels to include in output.  If None (default), all values where\n        non-zero ``label_image`` are used.\n\n        The ``index`` argument only works when ``label_image`` is specified.\n\n    Returns\n    -------\n    medians : ndarray\n        Median of ``image`` over the ``index`` selected regions from\n        ``label_image``.\n    \"\"\"\n\n    image, label_image, index = _utils._norm_input_labels_index(\n        image, label_image, index\n    )\n\n    nan = np.float64(np.nan)\n\n    return labeled_comprehension(\n        image, label_image, index, np.median, np.float64, nan\n    )\n\n\ndef minimum(image, label_image=None, index=None):\n    \"\"\"\n    Find the minima over an image at specified subregions.\n\n    Parameters\n    ----------\n    image : ndarray\n        N-D image data\n    label_image : ndarray, optional\n        Image features noted by integers. If None (default), all values.\n    index : int or sequence of ints, optional\n        Labels to include in output.  If None (default), all values where\n        non-zero ``label_image`` are used.\n\n        The ``index`` argument only works when ``label_image`` is specified.\n\n    Returns\n    -------\n    minima : ndarray\n        Minima of ``image`` over the ``index`` selected regions from\n        ``label_image``.\n    \"\"\"\n\n    image, label_image, index = _utils._norm_input_labels_index(\n        image, label_image, index\n    )\n\n    return labeled_comprehension(\n        image, label_image, index, np.min, image.dtype, image.dtype.type(0)\n    )\n\n\ndef minimum_position(image, label_image=None, index=None):\n    \"\"\"\n    Find the positions of minima over an image at specified subregions.\n\n    Parameters\n    ----------\n    image : ndarray\n        N-D image data\n    label_image : ndarray, optional\n        Image features noted by integers. If None (default), all values.\n    index : int or sequence of ints, optional\n        Labels to include in output.  If None (default), all values where\n        non-zero ``label_image`` are used.\n\n        The ``index`` argument only works when ``label_image`` is specified.\n\n    Returns\n    -------\n    minima_positions : ndarray\n        Maxima positions of ``image`` over the ``index`` selected regions from\n        ``label_image``.\n    \"\"\"\n\n    image, label_image, index = _utils._norm_input_labels_index(\n        image, label_image, index\n    )\n\n    if index.shape:\n        index = index.flatten()\n\n    out_dtype = np.dtype([(\"pos\", int, (image.ndim,))])\n    default_1d = np.zeros((1,), dtype=out_dtype)\n\n    func = functools.partial(\n        _utils._argmin, shape=image.shape, dtype=out_dtype\n    )\n    min_pos_lbl = labeled_comprehension(\n        image, label_image, index,\n        func, out_dtype, default_1d[0], pass_positions=True\n    )\n    min_pos_lbl = min_pos_lbl[\"pos\"]\n\n    if index.shape == tuple():\n        min_pos_lbl = da.squeeze(min_pos_lbl)\n\n    return min_pos_lbl\n\n\ndef standard_deviation(image, label_image=None, index=None):\n    \"\"\"\n    Find the standard deviation over an image at specified subregions.\n\n    Parameters\n    ----------\n    image : ndarray\n        N-D image data\n    label_image : ndarray, optional\n        Image features noted by integers. If None (default), all values.\n    index : int or sequence of ints, optional\n        Labels to include in output.  If None (default), all values where\n        non-zero ``label_image`` are used.\n\n        The ``index`` argument only works when ``label_image`` is specified.\n\n    Returns\n    -------\n    standard_deviation : ndarray\n        Standard deviation of ``image`` over the ``index`` selected regions\n        from ``label_image``.\n    \"\"\"\n\n    image, label_image, index = _utils._norm_input_labels_index(\n        image, label_image, index\n    )\n\n    nan = np.float64(np.nan)\n\n    std_lbl = labeled_comprehension(\n        image, label_image, index, np.std, np.float64, nan\n    )\n\n    return std_lbl\n\n\ndef sum_labels(image, label_image=None, index=None):\n    \"\"\"\n    Find the sum of all pixels over specified subregions of an image.\n\n    Parameters\n    ----------\n    image : ndarray\n        N-D image data\n    label_image : ndarray, optional\n        Image features noted by integers. If None (default), all values.\n    index : int or sequence of ints, optional\n        Labels to include in output.  If None (default), all values where\n        non-zero ``label_image`` are used.\n\n        The ``index`` argument only works when ``label_image`` is specified.\n\n    Returns\n    -------\n    sum_lbl : ndarray\n        Sum of ``image`` over the ``index`` selected regions from\n        ``label_image``.\n    \"\"\"\n\n    image, label_image, index = _utils._norm_input_labels_index(\n        image, label_image, index\n    )\n\n    sum_lbl = labeled_comprehension(\n        image, label_image, index, np.sum, np.float64, np.float64(0)\n    )\n\n    return sum_lbl\n\n\ndef sum(image, label_image=None, index=None):\n    \"\"\"DEPRECATED FUNCTION. Use `sum_labels` instead.\"\"\"\n    warnings.warn(\"DEPRECATED FUNCTION. Use `sum_labels` instead.\",\n                  DeprecationWarning)\n    return sum_labels(image, label_image=label_image, index=index)\n\n\ndef variance(image, label_image=None, index=None):\n    \"\"\"\n    Find the variance over an image at specified subregions.\n\n    Parameters\n    ----------\n    image : ndarray\n        N-D image data\n    label_image : ndarray, optional\n        Image features noted by integers. If None (default), all values.\n    index : int or sequence of ints, optional\n        Labels to include in output.  If None (default), all values where\n        non-zero ``label_image`` are used.\n\n        The ``index`` argument only works when ``label_image`` is specified.\n\n    Returns\n    -------\n    variance : ndarray\n        Variance of ``image`` over the ``index`` selected regions from\n        ``label_image``.\n    \"\"\"\n\n    image, label_image, index = _utils._norm_input_labels_index(\n        image, label_image, index\n    )\n\n    nan = np.float64(np.nan)\n\n    var_lbl = labeled_comprehension(\n        image, label_image, index, np.var, np.float64, nan\n    )\n\n    return var_lbl\n"
  },
  {
    "path": "dask_image/ndmeasure/_utils/__init__.py",
    "content": "# -*- coding: utf-8 -*-\nimport warnings\n\nimport dask\nimport dask.array as da\nimport numpy as np\n\n\ndef _norm_input_labels_index(image, label_image=None, index=None):\n    \"\"\"\n    Normalize arguments to a standard form.\n    \"\"\"\n\n    image = da.asarray(image)\n\n    if label_image is None:\n        label_image = da.ones(\n            image.shape, dtype=int, chunks=image.chunks,\n        )\n        index = da.from_array(np.array(1, dtype=int))\n    elif index is None:\n        label_image = (label_image > 0).astype(int)\n        index = da.from_array(np.array(1, dtype=int))\n\n    label_image = da.asarray(label_image)\n    index = da.asarray(index)\n\n    if index.ndim > 1:\n        warnings.warn(\n            \"Having index with dimensionality greater than 1 is undefined.\",\n            FutureWarning\n        )\n\n    if image.shape != label_image.shape:\n        raise ValueError(\n            \"The image and label_image arrays must be the same shape.\"\n        )\n\n    return (image, label_image, index)\n\n\ndef _ravel_shape_indices_kernel(*args):\n    args2 = tuple(\n        a[i * (None,) + (slice(None),) + (len(args) - i - 1) * (None,)]\n        for i, a in enumerate(args)\n    )\n    return sum(args2)\n\n\ndef _ravel_shape_indices(dimensions, dtype=int, chunks=None):\n    \"\"\"\n    Gets the raveled indices shaped like input.\n    \"\"\"\n\n    indices = [\n        da.arange(\n            0,\n            np.prod(dimensions[i:], dtype=dtype),\n            np.prod(dimensions[i + 1:], dtype=dtype),\n            dtype=dtype,\n            chunks=c\n        )\n        for i, c in enumerate(chunks)\n    ]\n\n    indices = da.blockwise(\n        _ravel_shape_indices_kernel, tuple(range(len(indices))),\n        *sum([(a, (i,)) for i, a in enumerate(indices)], tuple()),\n        dtype=dtype\n    )\n\n    return indices\n\n\ndef _argmax(a, positions, shape, dtype):\n    \"\"\"\n    Find original array position corresponding to the maximum.\n    \"\"\"\n\n    result = np.empty((1,), dtype=dtype)\n\n    pos_nd = np.unravel_index(positions[np.argmax(a)], shape)\n    for i, pos_nd_i in enumerate(pos_nd):\n        result[\"pos\"][0, i] = pos_nd_i\n\n    return result[0]\n\n\ndef _argmin(a, positions, shape, dtype):\n    \"\"\"\n    Find original array position corresponding to the minimum.\n    \"\"\"\n\n    result = np.empty((1,), dtype=dtype)\n\n    pos_nd = np.unravel_index(positions[np.argmin(a)], shape)\n    for i, pos_nd_i in enumerate(pos_nd):\n        result[\"pos\"][0, i] = pos_nd_i\n\n    return result[0]\n\n\ndef _center_of_mass(a, positions, shape, dtype):\n    \"\"\"\n    Find the center of mass for each ROI.\n    \"\"\"\n\n    result = np.empty((1,), dtype=dtype)\n\n    positions_nd = np.unravel_index(positions, shape)\n    a_sum = np.sum(a)\n\n    a_wt_i = np.empty(a.shape)\n    for i, pos_nd_i in enumerate(positions_nd):\n        a_wt_sum_i = np.multiply(a, pos_nd_i, out=a_wt_i).sum()\n        result[\"com\"][0, i] = a_wt_sum_i / a_sum\n\n    return result[0]\n\n\ndef _extrema(a, positions, shape, dtype):\n    \"\"\"\n    Find minimum and maximum as well as positions for both.\n    \"\"\"\n\n    result = np.empty((1,), dtype=dtype)\n\n    int_min_pos = np.argmin(a)\n    int_max_pos = np.argmax(a)\n\n    result[\"min_val\"] = a[int_min_pos]\n    result[\"max_val\"] = a[int_max_pos]\n\n    min_pos_nd = np.unravel_index(positions[int_min_pos], shape)\n    max_pos_nd = np.unravel_index(positions[int_max_pos], shape)\n    for i in range(len(shape)):\n        result[\"min_pos\"][0, i] = min_pos_nd[i]\n        result[\"max_pos\"][0, i] = max_pos_nd[i]\n\n    return result[0]\n\n\ndef _histogram(image,\n               min,\n               max,\n               bins):\n    \"\"\"\n    Delayed wrapping of NumPy's histogram\n\n    Also reformats the arguments.\n    \"\"\"\n\n    return np.histogram(image, bins, (min, max))[0]\n\n\n@dask.delayed\ndef _labeled_comprehension_delayed(func,\n                                   out_dtype,\n                                   default,\n                                   a,\n                                   positions=None):\n    \"\"\"\n    Wrapped delayed labeled comprehension function\n\n    Included in the module for pickling purposes. Also handle cases where\n    computation should not occur.\n    \"\"\"\n\n    result = np.empty((1,), dtype=out_dtype)\n\n    if a.size:\n        if positions is None:\n            result[0] = func(a)\n        else:\n            result[0] = func(a, positions)\n    else:\n        result[0] = default[0]\n\n    return result\n\n\ndef _labeled_comprehension_func(func,\n                                out_dtype,\n                                default,\n                                a,\n                                positions=None):\n    \"\"\"\n    Wrapped labeled comprehension function\n\n    Ensures the result is a proper Dask Array and the computation delayed.\n    \"\"\"\n\n    return da.from_delayed(\n        _labeled_comprehension_delayed(func, out_dtype, default, a, positions),\n        (1,),\n        out_dtype\n    )\n"
  },
  {
    "path": "dask_image/ndmeasure/_utils/_find_objects.py",
    "content": "import numpy as np\nfrom dask.delayed import Delayed\nimport dask.config as dask_config\n\n\ndef _array_chunk_location(block_id, chunks):\n    \"\"\"Pixel coordinate of top left corner of the array chunk.\"\"\"\n    array_location = []\n    for idx, chunk in zip(block_id, chunks):\n        array_location.append(sum(chunk[:idx]))\n    return tuple(array_location)\n\n\ndef _find_bounding_boxes(x, array_location):\n    \"\"\"An alternative to scipy.ndimage.find_objects.\n\n    We use this alternative because scipy.ndimage.find_objects\n    returns a tuple of length N, where N is the largest integer label.\n    This is not ideal for distributed labels, where there might be only\n    one or two objects in an image chunk labelled with very large integers.\n\n    This alternative function returns a pandas dataframe,\n    with one row per object found in the image chunk.\n    \"\"\"\n    import pandas as pd\n\n    unique_vals = np.unique(x)\n    unique_vals = unique_vals[unique_vals != 0]\n    result = {}\n    for val in unique_vals:\n        positions = np.where(x == val)\n        slices = tuple(\n            slice(\n                np.min(pos) + array_location[i],\n                np.max(pos) + 1 + array_location[i]\n            )\n            for i, pos in enumerate(positions)\n        )\n        result[val] = slices\n    column_names = [i for i in range(x.ndim)]  # column names are: 0, 1, ... nD\n    return pd.DataFrame.from_dict(result, orient='index', columns=column_names)\n\n\ndef _combine_slices(slices):\n    \"Return the union of all slices.\"\n    if len(slices) == 1:\n        return slices[0]\n    else:\n        start = min([sl.start for sl in slices])\n        stop = max([sl.stop for sl in slices])\n        return slice(start, stop)\n\n\ndef _merge_bounding_boxes(x, ndim):\n    \"Merge the bounding boxes describing objects over multiple image chunks.\"\n    import pandas as pd\n\n    x = x.dropna()\n    data = {}\n    # For each dimension in the array,\n    # pick out the slice values belonging to that dimension\n    # and combine the slices\n    # (i.e. find the union; the slice expanded to all input slices).\n    for i in range(ndim):\n        # Array dimensions are labelled by a number followed by an underscroe\n        # i.e. column labels are: 0_x, 1_x, 2_x, ... 0_y, 1_y, 2_y, ...\n        # (x and y represent the pair of chunks label slices are merged from)\n        slices = [x[ii] for ii in x.index if str(ii).startswith(str(i))]\n        combined_slices = _combine_slices(slices)\n        data[i] = combined_slices\n    result = pd.Series(data=data, index=[i for i in range(ndim)], name=x.name)\n    return result\n\n\ndef _find_objects(ndim, df1, df2):\n    \"\"\"Main utility function for find_objects.\"\"\"\n    import pandas as pd\n    import dask.dataframe as dd\n\n    meta = dd.utils.make_meta([(i, object) for i in range(ndim)])\n    if isinstance(df1, Delayed):\n        with dask_config.set({'dataframe.convert-string': False}):\n            df1 = dd.from_delayed(df1, meta=meta)\n    if isinstance(df2, Delayed):\n        with dask_config.set({'dataframe.convert-string': False}):\n            df2 = dd.from_delayed(df2, meta=meta)\n\n    if len(df1) > 0 and len(df2) > 0:\n        ddf = dd.merge(\n            df1, df2,\n            how=\"outer\", left_index=True, right_index=True)\n    elif len(df1) > 0:\n        ddf = df1\n    elif len(df2) > 0:\n        ddf = df2\n    else:\n        ddf = pd.DataFrame()\n\n    result = ddf.apply(_merge_bounding_boxes, ndim=ndim, axis=1, meta=meta)\n    return result\n"
  },
  {
    "path": "dask_image/ndmeasure/_utils/_label.py",
    "content": "# -*- coding: utf-8 -*-\n\nimport operator\n\nimport dask\nimport dask.array as da\nimport numpy as np\nimport scipy.ndimage\nimport scipy.sparse\nimport scipy.sparse.csgraph\n\n\ndef _get_ndimage_label_dtype():\n    return scipy.ndimage.label([1, 0, 1])[0].dtype\n\n\nLABEL_DTYPE = _get_ndimage_label_dtype()\n\n\ndef _get_connected_components_dtype():\n    a = np.empty((0, 0), dtype=int)\n    return scipy.sparse.csgraph.connected_components(a)[1].dtype\n\n\nCONN_COMP_DTYPE = _get_connected_components_dtype()\n\n\ndef relabel_blocks(block_labeled, new_labeling):\n    \"\"\"\n    Relabel a block-labeled array based on ``new_labeling``.\n\n    Parameters\n    ----------\n    block_labeled : array of int\n        The input label array.\n    new_labeling : 1D array of int\n        A new labeling, such that ``labeling[i] = j`` implies that\n        any element in ``array`` valued ``i`` should be relabeled to ``j``.\n\n    Returns\n    -------\n    relabeled : array of int, same shape as ``array``\n        The relabeled input array.\n    \"\"\"\n    new_labeling = new_labeling.astype(LABEL_DTYPE)\n    relabeled = da.map_blocks(operator.getitem,\n                              new_labeling,\n                              block_labeled,\n                              dtype=LABEL_DTYPE,\n                              chunks=block_labeled.chunks)\n    return relabeled\n\n\ndef _unique_axis(a, axis=0):\n    \"\"\"Find unique subarrays in axis in N-D array.\"\"\"\n    at = np.ascontiguousarray(a.swapaxes(0, axis))\n    dt = np.dtype([(\"values\", at.dtype, at.shape[1:])])\n    atv = at.view(dt)\n    r = np.unique(atv)[\"values\"].swapaxes(0, axis)\n    return r\n\n\ndef _across_block_label_grouping(face, structure):\n    \"\"\"\n    Find a grouping of labels across block faces.\n\n    We assume that the labels on either side of the block face are unique to\n    that block. This is enforced elsewhere.\n\n    Parameters\n    ----------\n    face : array-like\n        This is the boundary, of thickness (2,), between two blocks.\n    structure : array-like\n        Structuring element for the labeling of the face. This should have\n        length 3 along each axis and have the same number of dimensions as\n        ``face``.\n\n    Returns\n    -------\n    grouped : array of int, shape (2, M)\n        If a column of ``grouped`` contains the values ``i`` and ``j``, it\n        implies that labels ``i`` and ``j`` belong in the same group. These\n        are edges in a global label connectivity graph.\n\n    Examples\n    --------\n    >>> face = np.array([[1, 1, 0, 2, 2, 0, 8],\n    ...                     [0, 7, 7, 7, 7, 0, 9]])\n    >>> structure = np.ones((3, 3), dtype=bool)\n    >>> _across_block_label_grouping(face, structure)\n    array([[1, 2, 8],\n           [2, 7, 9]], dtype=np.int32)\n\n    This shows that 1-2 are connected, 2-7 are connected, and 8-9 are\n    connected. The resulting graph is (1-2-7), (8-9).\n    \"\"\"\n    common_labels = scipy.ndimage.label(face, structure)[0]\n    matching = np.stack((common_labels.ravel(), face.ravel()), axis=1)\n    unique_matching = _unique_axis(matching)\n    valid = np.all(unique_matching, axis=1)\n    unique_valid_matching = unique_matching[valid]\n    common_labels, labels = unique_valid_matching.T\n    in_group = np.flatnonzero(np.diff(common_labels) == 0)\n    i = np.take(labels, in_group)\n    j = np.take(labels, in_group + 1)\n    grouped = np.stack((i, j), axis=0)\n    return grouped\n\n\ndef _across_block_label_grouping_delayed(face, structure):\n    \"\"\"Delayed version of :func:`_across_block_label_grouping`.\"\"\"\n    _across_block_label_grouping_ = dask.delayed(_across_block_label_grouping)\n    grouped = _across_block_label_grouping_(face, structure)\n    return da.from_delayed(grouped, shape=(2, np.nan), dtype=LABEL_DTYPE)\n\n\n@dask.delayed\ndef _to_csr_matrix(i, j, n):\n    \"\"\"Using i and j as coo-format coordinates, return csr matrix.\"\"\"\n    v = np.ones_like(i)\n    mat = scipy.sparse.coo_matrix((v, (i, j)), shape=(n, n))\n    return mat.tocsr()\n\n\ndef label_adjacency_graph(labels, structure, nlabels, wrap_axes=None):\n    \"\"\"\n    Adjacency graph of labels between chunks of ``labels``.\n\n    Each chunk in ``labels`` has been labeled independently, and the labels\n    in different chunks are guaranteed to be unique.\n\n    Here we construct a graph connecting labels in different chunks that\n    correspond to the same logical label in the global volume. This is true\n    if the two labels \"touch\" across the block face as defined by the input\n    ``structure``.\n\n    Parameters\n    ----------\n    labels : dask array of int\n        The input labeled array, where each chunk is independently labeled.\n    structure : array of bool\n        Structuring element, shape (3,) * labels.ndim.\n    nlabels : delayed int\n        The total number of labels in ``labels`` *before* correcting for\n        global consistency.\n    wrap_axes : tuple of int, optional\n        Should labels be wrapped across array boundaries, and if so which axes.\n        - (0,) only wrap over the 0th axis.\n        - (0, 1) wrap over the 0th and 1st axis.\n        - (0, 1, 3)  wrap over 0th, 1st and 3rd axis.\n\n    Returns\n    -------\n    mat : delayed scipy.sparse.csr_matrix\n        This matrix has value 1 at (i, j) if label i is connected to\n        label j in the global volume, 0 everywhere else.\n    \"\"\"\n\n    if structure is None:\n        structure = scipy.ndimage.generate_binary_structure(labels.ndim, 1)\n\n    face_slices = _chunk_faces(\n        labels.chunks, labels.shape, structure, wrap_axes=wrap_axes\n    )\n    all_mappings = [da.empty((2, 0), dtype=LABEL_DTYPE, chunks=1)]\n\n    for face_slice in face_slices:\n        face = labels[face_slice]\n        mapped = _across_block_label_grouping_delayed(face, structure)\n        all_mappings.append(mapped)\n\n    all_mappings = da.concatenate(all_mappings, axis=1)\n    i, j = all_mappings\n    mat = _to_csr_matrix(i, j, nlabels + 1)\n\n    return mat\n\n\ndef _chunk_faces(chunks, shape, structure, wrap_axes=None):\n    \"\"\"\n    Return slices for two-pixel-wide boundaries between chunks.\n\n    Parameters\n    ----------\n    chunks : tuple of tuple of int\n        The chunk specification of the array.\n    shape : tuple of int\n        The shape of the array.\n    structure: array of bool\n        Structuring element, shape (3,) * ndim.\n    wrap_axes : tuple of int, optional\n        Should labels be wrapped across array boundaries, and if so which axes.\n        - (0,) only wrap over the 0th axis.\n        - (0, 1) wrap over the 0th and 1st axis.\n        - (0, 1, 3)  wrap over 0th, 1st and 3rd axis.\n\n    Yields\n    -------\n    tuple of slices\n        Each element indexes a face between two chunks.\n\n    Examples\n    --------\n    >>> import dask.array as da\n    >>> import scipy.ndimage as ndi\n    >>> a = da.arange(110, chunks=110).reshape((10, 11)).rechunk(5)\n    >>> structure = ndi.generate_binary_structure(2, 1)\n    >>> list(chunk_faces(a.chunks, a.shape, structure))\n    [(slice(4, 6, None), slice(0, 5, None)),\n     (slice(4, 6, None), slice(5, 10, None)),\n     (slice(4, 6, None), slice(10, 11, None)),\n     (slice(0, 5, None), slice(4, 6, None)),\n     (slice(0, 5, None), slice(9, 11, None)),\n     (slice(5, 10, None), slice(4, 6, None)),\n     (slice(5, 10, None), slice(9, 11, None))]\n    \"\"\"\n\n    ndim = len(shape)\n\n    slices = da.core.slices_from_chunks(chunks)\n\n    # arrange block/chunk indices on grid\n    block_summary = np.arange(len(slices)).reshape(\n        [len(c) for c in chunks])\n\n    # Iterate over all blocks and use the structuring element\n    # to determine which blocks should be connected.\n    # For wrappped axes, we need to consider the block\n    # before the current block with index -1 as well.\n    numblocks = [len(c) if wrap_axes is None or ax not in wrap_axes\n                 else len(c) + 1 for ax, c in enumerate(chunks)]\n    for curr_block in np.ndindex(tuple(numblocks)):\n\n        curr_block = list(curr_block)\n\n        if wrap_axes is not None:\n            # start at -1 indices for wrapped axes\n            for wrap_axis in wrap_axes:\n                curr_block[wrap_axis] = curr_block[wrap_axis] - 1\n\n        # iterate over neighbors of the current block\n        for pos_structure_coord in np.array(np.where(structure)).T:\n\n            # only consider forward neighbors\n            if min(pos_structure_coord) < 1 or max(pos_structure_coord) < 2:\n                continue\n\n            neigh_block = [\n                curr_block[dim] + pos_structure_coord[dim] - 1\n                for dim in range(ndim)\n            ]\n\n            if max([neigh_block[dim] >= block_summary.shape[dim]\n                    for dim in range(ndim)]):\n                continue\n\n            # get current slice index\n            ind_curr_block = block_summary[tuple(curr_block)]\n\n            curr_slice = []\n            for dim in range(ndim):\n                # keep slice if not on boundary\n                if neigh_block[dim] == curr_block[dim]:\n                    curr_slice.append(slices[ind_curr_block][dim])\n                # otherwise, add two-pixel-wide boundary\n                else:\n                    if slices[ind_curr_block][dim].stop == shape[dim]:\n                        curr_slice.append(slice(None, None, shape[dim] - 1))\n                    else:\n                        curr_slice.append(slice(\n                            slices[ind_curr_block][dim].stop - 1,\n                            slices[ind_curr_block][dim].stop + 1))\n\n            yield tuple(curr_slice)\n\n\ndef block_ndi_label_delayed(block, structure):\n    \"\"\"\n    Delayed version of ``scipy.ndimage.label``.\n\n    Parameters\n    ----------\n    block : dask array (single chunk)\n        The input array to be labeled.\n    structure : array of bool\n        Structure defining the connectivity of the labeling.\n\n    Returns\n    -------\n    labeled : dask array, same shape as ``block``.\n        The labeled array.\n    n : delayed int\n        The number of labels in ``labeled``.\n    \"\"\"\n    label = dask.delayed(scipy.ndimage.label, nout=2)\n    labeled_block, n = label(block, structure=structure)\n    n = dask.delayed(LABEL_DTYPE.type)(n)\n    labeled = da.from_delayed(labeled_block, shape=block.shape,\n                              dtype=LABEL_DTYPE)\n    n = da.from_delayed(n, shape=(), dtype=LABEL_DTYPE)\n    return labeled, n\n\n\ndef connected_components_delayed(csr_matrix):\n    \"\"\"\n    Delayed version of scipy.sparse.csgraph.connected_components.\n\n    This version only returns the (delayed) connected component labelling, not\n    the number of components.\n    \"\"\"\n    conn_comp = dask.delayed(scipy.sparse.csgraph.connected_components, nout=2)\n    return da.from_delayed(conn_comp(csr_matrix, directed=False)[1],\n                           shape=(np.nan,), dtype=CONN_COMP_DTYPE)\n"
  },
  {
    "path": "dask_image/ndmorph/__init__.py",
    "content": "# -*- coding: utf-8 -*-\nimport scipy.ndimage\n\nfrom ..dispatch._dispatch_ndmorph import (dispatch_binary_dilation,\n                                          dispatch_binary_erosion)\nfrom . import _ops, _utils\n\n__all__ = [\n    \"binary_closing\",\n    \"binary_dilation\",\n    \"binary_erosion\",\n    \"binary_opening\",\n]\n\n\n@_utils._update_wrapper(scipy.ndimage.binary_closing)\ndef binary_closing(image,\n                   structure=None,\n                   iterations=1,\n                   origin=0,\n                   mask=None,\n                   border_value=0,\n                   brute_force=False):\n    image = (image != 0)\n\n    structure = _utils._get_structure(image, structure)\n    iterations = _utils._get_iterations(iterations)\n    origin = _utils._get_origin(structure.shape, origin)\n\n    kwargs = dict(\n        structure=structure,\n        iterations=iterations,\n        origin=origin,\n        mask=mask,\n        border_value=border_value,\n        brute_force=brute_force\n    )\n\n    result = image\n    result = binary_dilation(result, **kwargs)\n    result = binary_erosion(result, **kwargs)\n\n    return result\n\n\n@_utils._update_wrapper(scipy.ndimage.binary_dilation)\ndef binary_dilation(image,\n                    structure=None,\n                    iterations=1,\n                    mask=None,\n                    border_value=0,\n                    origin=0,\n                    brute_force=False):\n    border_value = _utils._get_border_value(border_value)\n\n    result = _ops._binary_op(\n        dispatch_binary_dilation(image),\n        image,\n        structure=structure,\n        iterations=iterations,\n        mask=mask,\n        origin=origin,\n        brute_force=brute_force,\n        border_value=border_value\n    )\n\n    return result\n\n\n@_utils._update_wrapper(scipy.ndimage.binary_erosion)\ndef binary_erosion(image,\n                   structure=None,\n                   iterations=1,\n                   mask=None,\n                   border_value=0,\n                   origin=0,\n                   brute_force=False):\n    border_value = _utils._get_border_value(border_value)\n\n    result = _ops._binary_op(\n        dispatch_binary_erosion(image),\n        image,\n        structure=structure,\n        iterations=iterations,\n        mask=mask,\n        origin=origin,\n        brute_force=brute_force,\n        border_value=border_value\n    )\n\n    return result\n\n\n@_utils._update_wrapper(scipy.ndimage.binary_opening)\ndef binary_opening(image,\n                   structure=None,\n                   iterations=1,\n                   origin=0,\n                   mask=None,\n                   border_value=0,\n                   brute_force=False):\n    image = (image != 0)\n\n    structure = _utils._get_structure(image, structure)\n    iterations = _utils._get_iterations(iterations)\n    origin = _utils._get_origin(structure.shape, origin)\n\n    kwargs = dict(\n        structure=structure,\n        iterations=iterations,\n        origin=origin,\n        mask=mask,\n        border_value=border_value,\n        brute_force=brute_force\n    )\n\n    result = image\n    result = binary_erosion(result, **kwargs)\n    result = binary_dilation(result, **kwargs)\n\n    return result\n"
  },
  {
    "path": "dask_image/ndmorph/_ops.py",
    "content": "# -*- coding: utf-8 -*-\n\n\nimport dask.array as da\n\nfrom . import _utils\n\n\ndef _binary_op(func,\n               image,\n               structure=None,\n               iterations=1,\n               mask=None,\n               origin=0,\n               brute_force=False,\n               **kwargs):\n    image = (image != 0)\n\n    structure = _utils._get_structure(image, structure)\n    iterations = _utils._get_iterations(iterations)\n    mask = _utils._get_mask(image, mask)\n    origin = _utils._get_origin(structure.shape, origin)\n    brute_force = _utils._get_brute_force(brute_force)\n    depth = _utils._get_depth(structure.shape, origin)\n    depth, boundary = _utils._get_depth_boundary(structure.ndim, depth, \"none\")\n\n    result = image\n    for i in range(iterations):\n        iter_result = result.map_overlap(\n            func,\n            depth=depth,\n            boundary=boundary,\n            dtype=bool,\n            meta=image._meta,\n            structure=structure,\n            origin=origin,\n            **kwargs\n        )\n        result = da.where(mask, iter_result, result)\n        result._meta = image._meta.astype(bool)\n\n    return result\n"
  },
  {
    "path": "dask_image/ndmorph/_utils.py",
    "content": "# -*- coding: utf-8 -*-\n\n\nimport numbers\n\nimport dask.array as da\nimport numpy as np\n\nfrom ..dispatch._dispatch_ndmorph import dispatch_binary_structure\nfrom ..ndfilters._utils import (_get_depth, _get_depth_boundary, _get_origin,\n                                _update_wrapper)\n\n_update_wrapper = _update_wrapper\n_get_depth_boundary = _get_depth_boundary\n_get_origin = _get_origin\n_get_depth = _get_depth\n\n\ndef _get_structure(image, structure):\n    # Create square connectivity as default\n    if structure is None:\n        generate_binary_structure = dispatch_binary_structure(image)\n        structure = generate_binary_structure(image.ndim, 1)\n    elif hasattr(structure, 'ndim'):\n        if structure.ndim != image.ndim:\n            raise RuntimeError(\n                \"`structure` must have the same rank as `image`.\"\n            )\n        if not issubclass(structure.dtype.type, np.bool_):\n            structure = (structure != 0)\n    else:\n        raise TypeError(\"`structure` must be an array.\")\n\n    return structure\n\n\ndef _get_iterations(iterations):\n    if not isinstance(iterations, numbers.Integral):\n        raise TypeError(\"`iterations` must be of integral type.\")\n    if iterations < 1:\n        raise NotImplementedError(\n            \"`iterations` must be equal to 1 or greater not less.\"\n        )\n\n    return iterations\n\n\ndef _get_dtype(a):\n    # Get the dtype of a value or an array.\n    # Even handle non-NumPy types.\n    return getattr(a, \"dtype\", np.dtype(type(a)))\n\n\ndef _get_mask(image, mask):\n    if mask is None:\n        mask = True\n\n    mask_type = _get_dtype(mask).type\n    if isinstance(mask, (np.ndarray, da.Array)):\n        if mask.shape != image.shape:\n            raise RuntimeError(\"`mask` must have the same shape as `image`.\")\n        if not issubclass(mask_type, np.bool_):\n            mask = (mask != 0)\n    elif issubclass(mask_type, np.bool_):\n        mask = bool(mask)\n    else:\n        raise TypeError(\"`mask` must be a Boolean or an array.\")\n\n    return mask\n\n\ndef _get_border_value(border_value):\n    if not isinstance(border_value, numbers.Integral):\n        raise TypeError(\"`border_value` must be of integral type.\")\n\n    border_value = (border_value != 0)\n\n    return border_value\n\n\ndef _get_brute_force(brute_force):\n    if brute_force is not False:\n        if brute_force is True:\n            raise NotImplementedError(\n                \"`brute_force` other than `False` is not yet supported.\"\n            )\n        else:\n            raise TypeError(\n                \"`brute_force` must be `bool`.\"\n            )\n\n    return brute_force\n"
  },
  {
    "path": "docs/Makefile",
    "content": "# Makefile for Sphinx documentation\n#\n\n# You can set these variables from the command line.\nSPHINXOPTS    =\nSPHINXBUILD   = sphinx-build\nPAPER         =\nBUILDDIR      = _build\n\n# User-friendly check for sphinx-build\nifeq ($(shell which $(SPHINXBUILD) >/dev/null 2>&1; echo $$?), 1)\n$(error The '$(SPHINXBUILD)' command was not found. Make sure you have Sphinx installed, then set the SPHINXBUILD environment variable to point to the full path of the '$(SPHINXBUILD)' executable. Alternatively you can add the directory with the executable to your PATH. If you don't have Sphinx installed, grab it from http://sphinx-doc.org/)\nendif\n\n# Internal variables.\nPAPEROPT_a4     = -D latex_paper_size=a4\nPAPEROPT_letter = -D latex_paper_size=letter\nALLSPHINXOPTS   = -d $(BUILDDIR)/doctrees $(PAPEROPT_$(PAPER)) $(SPHINXOPTS) .\n# the i18n builder cannot share the environment and doctrees with the others\nI18NSPHINXOPTS  = $(PAPEROPT_$(PAPER)) $(SPHINXOPTS) .\n\n.PHONY: help clean html dirhtml singlehtml pickle json htmlhelp qthelp devhelp epub latex latexpdf text man changes linkcheck doctest gettext\n\nhelp:\n\t@echo \"Please use \\`make <target>' where <target> is one of\"\n\t@echo \"  html       to make standalone HTML files\"\n\t@echo \"  dirhtml    to make HTML files named index.html in directories\"\n\t@echo \"  singlehtml to make a single large HTML file\"\n\t@echo \"  pickle     to make pickle files\"\n\t@echo \"  json       to make JSON files\"\n\t@echo \"  htmlhelp   to make HTML files and a HTML help project\"\n\t@echo \"  qthelp     to make HTML files and a qthelp project\"\n\t@echo \"  devhelp    to make HTML files and a Devhelp project\"\n\t@echo \"  epub       to make an epub\"\n\t@echo \"  latex      to make LaTeX files, you can set PAPER=a4 or PAPER=letter\"\n\t@echo \"  latexpdf   to make LaTeX files and run them through pdflatex\"\n\t@echo \"  latexpdfja to make LaTeX files and run them through platex/dvipdfmx\"\n\t@echo \"  text       to make text files\"\n\t@echo \"  man        to make manual pages\"\n\t@echo \"  texinfo    to make Texinfo files\"\n\t@echo \"  info       to make Texinfo files and run them through makeinfo\"\n\t@echo \"  gettext    to make PO message catalogs\"\n\t@echo \"  changes    to make an overview of all changed/added/deprecated items\"\n\t@echo \"  xml        to make Docutils-native XML files\"\n\t@echo \"  pseudoxml  to make pseudoxml-XML files for display purposes\"\n\t@echo \"  linkcheck  to check all external links for integrity\"\n\t@echo \"  doctest    to run all doctests embedded in the documentation (if enabled)\"\n\nclean:\n\trm -rf $(BUILDDIR)/*\n\nhtml:\n\t$(SPHINXBUILD) -b html $(ALLSPHINXOPTS) $(BUILDDIR)/html\n\t@echo\n\t@echo \"Build finished. The HTML pages are in $(BUILDDIR)/html.\"\n\ndirhtml:\n\t$(SPHINXBUILD) -b dirhtml $(ALLSPHINXOPTS) $(BUILDDIR)/dirhtml\n\t@echo\n\t@echo \"Build finished. The HTML pages are in $(BUILDDIR)/dirhtml.\"\n\nsinglehtml:\n\t$(SPHINXBUILD) -b singlehtml $(ALLSPHINXOPTS) $(BUILDDIR)/singlehtml\n\t@echo\n\t@echo \"Build finished. The HTML page is in $(BUILDDIR)/singlehtml.\"\n\npickle:\n\t$(SPHINXBUILD) -b pickle $(ALLSPHINXOPTS) $(BUILDDIR)/pickle\n\t@echo\n\t@echo \"Build finished; now you can process the pickle files.\"\n\njson:\n\t$(SPHINXBUILD) -b json $(ALLSPHINXOPTS) $(BUILDDIR)/json\n\t@echo\n\t@echo \"Build finished; now you can process the JSON files.\"\n\nhtmlhelp:\n\t$(SPHINXBUILD) -b htmlhelp $(ALLSPHINXOPTS) $(BUILDDIR)/htmlhelp\n\t@echo\n\t@echo \"Build finished; now you can run HTML Help Workshop with the\" \\\n\t      \".hhp project file in $(BUILDDIR)/htmlhelp.\"\n\nqthelp:\n\t$(SPHINXBUILD) -b qthelp $(ALLSPHINXOPTS) $(BUILDDIR)/qthelp\n\t@echo\n\t@echo \"Build finished; now you can run \"qcollectiongenerator\" with the\" \\\n\t      \".qhcp project file in $(BUILDDIR)/qthelp, like this:\"\n\t@echo \"# qcollectiongenerator $(BUILDDIR)/qthelp/dask_image.qhcp\"\n\t@echo \"To view the help file:\"\n\t@echo \"# assistant -collectionFile $(BUILDDIR)/qthelp/dask_image.qhc\"\n\ndevhelp:\n\t$(SPHINXBUILD) -b devhelp $(ALLSPHINXOPTS) $(BUILDDIR)/devhelp\n\t@echo\n\t@echo \"Build finished.\"\n\t@echo \"To view the help file:\"\n\t@echo \"# mkdir -p $$HOME/.local/share/devhelp/dask_image\"\n\t@echo \"# ln -s $(BUILDDIR)/devhelp $$HOME/.local/share/devhelp/dask_image\"\n\t@echo \"# devhelp\"\n\nepub:\n\t$(SPHINXBUILD) -b epub $(ALLSPHINXOPTS) $(BUILDDIR)/epub\n\t@echo\n\t@echo \"Build finished. The epub file is in $(BUILDDIR)/epub.\"\n\nlatex:\n\t$(SPHINXBUILD) -b latex $(ALLSPHINXOPTS) $(BUILDDIR)/latex\n\t@echo\n\t@echo \"Build finished; the LaTeX files are in $(BUILDDIR)/latex.\"\n\t@echo \"Run \\`make' in that directory to run these through (pdf)latex\" \\\n\t      \"(use \\`make latexpdf' here to do that automatically).\"\n\nlatexpdf:\n\t$(SPHINXBUILD) -b latex $(ALLSPHINXOPTS) $(BUILDDIR)/latex\n\t@echo \"Running LaTeX files through pdflatex...\"\n\t$(MAKE) -C $(BUILDDIR)/latex all-pdf\n\t@echo \"pdflatex finished; the PDF files are in $(BUILDDIR)/latex.\"\n\nlatexpdfja:\n\t$(SPHINXBUILD) -b latex $(ALLSPHINXOPTS) $(BUILDDIR)/latex\n\t@echo \"Running LaTeX files through platex and dvipdfmx...\"\n\t$(MAKE) -C $(BUILDDIR)/latex all-pdf-ja\n\t@echo \"pdflatex finished; the PDF files are in $(BUILDDIR)/latex.\"\n\ntext:\n\t$(SPHINXBUILD) -b text $(ALLSPHINXOPTS) $(BUILDDIR)/text\n\t@echo\n\t@echo \"Build finished. The text files are in $(BUILDDIR)/text.\"\n\nman:\n\t$(SPHINXBUILD) -b man $(ALLSPHINXOPTS) $(BUILDDIR)/man\n\t@echo\n\t@echo \"Build finished. The manual pages are in $(BUILDDIR)/man.\"\n\ntexinfo:\n\t$(SPHINXBUILD) -b texinfo $(ALLSPHINXOPTS) $(BUILDDIR)/texinfo\n\t@echo\n\t@echo \"Build finished. The Texinfo files are in $(BUILDDIR)/texinfo.\"\n\t@echo \"Run \\`make' in that directory to run these through makeinfo\" \\\n\t      \"(use \\`make info' here to do that automatically).\"\n\ninfo:\n\t$(SPHINXBUILD) -b texinfo $(ALLSPHINXOPTS) $(BUILDDIR)/texinfo\n\t@echo \"Running Texinfo files through makeinfo...\"\n\tmake -C $(BUILDDIR)/texinfo info\n\t@echo \"makeinfo finished; the Info files are in $(BUILDDIR)/texinfo.\"\n\ngettext:\n\t$(SPHINXBUILD) -b gettext $(I18NSPHINXOPTS) $(BUILDDIR)/locale\n\t@echo\n\t@echo \"Build finished. The message catalogs are in $(BUILDDIR)/locale.\"\n\nchanges:\n\t$(SPHINXBUILD) -b changes $(ALLSPHINXOPTS) $(BUILDDIR)/changes\n\t@echo\n\t@echo \"The overview file is in $(BUILDDIR)/changes.\"\n\nlinkcheck:\n\t$(SPHINXBUILD) -b linkcheck $(ALLSPHINXOPTS) $(BUILDDIR)/linkcheck\n\t@echo\n\t@echo \"Link check complete; look for any errors in the above output \" \\\n\t      \"or in $(BUILDDIR)/linkcheck/output.txt.\"\n\ndoctest:\n\t$(SPHINXBUILD) -b doctest $(ALLSPHINXOPTS) $(BUILDDIR)/doctest\n\t@echo \"Testing of doctests in the sources finished, look at the \" \\\n\t      \"results in $(BUILDDIR)/doctest/output.txt.\"\n\nxml:\n\t$(SPHINXBUILD) -b xml $(ALLSPHINXOPTS) $(BUILDDIR)/xml\n\t@echo\n\t@echo \"Build finished. The XML files are in $(BUILDDIR)/xml.\"\n\npseudoxml:\n\t$(SPHINXBUILD) -b pseudoxml $(ALLSPHINXOPTS) $(BUILDDIR)/pseudoxml\n\t@echo\n\t@echo \"Build finished. The pseudo-XML files are in $(BUILDDIR)/pseudoxml.\"\n"
  },
  {
    "path": "docs/api.rst",
    "content": "API\n===\n\n.. toctree::\n   :glob:\n\n   dask_image\n"
  },
  {
    "path": "docs/authors.rst",
    "content": ".. include:: ../AUTHORS.rst\n"
  },
  {
    "path": "docs/conf.py",
    "content": "#!/usr/bin/env python\n# -*- coding: utf-8 -*-\n#\n# dask-image documentation build configuration file, created by\n# sphinx-quickstart on Tue Jul  9 22:26:36 2013.\n#\n# This file is execfile()d with the current directory set to its\n# containing dir.\n#\n# Note that not all possible configuration values are present in this\n# autogenerated file.\n#\n# All configuration values have a default; values that are commented out\n# serve to show the default.\n\nimport sys\nimport os\n\nimport dask_image._version\n\n# Get the project root dir, which is the parent dir of this\ncwd = os.getcwd()\nproject_root = os.path.dirname(cwd)\n\n# Insert the project root dir as the first element in the PYTHONPATH.\n# This lets us ensure that the source package is imported, and that its\n# version is used.\nsys.path.insert(0, project_root)\n\n# -- General configuration ---------------------------------------------\n\n# Add any Sphinx extension module names here, as strings. They can be\n# extensions coming with Sphinx (named 'sphinx.ext.*') or your custom ones.\nextensions = [\n    'sphinx.ext.autodoc',\n    'sphinx.ext.viewcode',\n    'sphinx.ext.todo',\n    'sphinx.ext.napoleon'\n]\n\n# Add any paths that contain templates here, relative to this directory.\ntemplates_path = ['_templates']\n\n# The suffix of source filenames.\nsource_suffix = '.rst'\n\n# The master toctree document.\nmaster_doc = 'index'\n\n# General information about the project.\nproject = u'dask-image'\ncopyright = u\"2018, John Kirkham\"\n\n# The version info for the project you're documenting, acts as replacement\n# for |version| and |release|, also used in various other places throughout\n# the built documents.\n#\n# The full version, including alpha/beta/rc tags.\nrelease = dask_image._version.__version__\n# The short X.Y.Z version.\nversion = '.'.join(release.split('.')[:3])\nif \"dev\" in release:\n    display_version = \"(development version)\"\nelse:\n    display_version = version\n\n# List of patterns, relative to source directory, that match files and\n# directories to ignore when looking for source files.\nexclude_patterns = ['_build']\n\n\n# -- Options for HTML output -------------------------------------------\n\n# Set canonical URL from the Read the Docs Domain\nhtml_baseurl = os.environ.get(\"READTHEDOCS_CANONICAL_URL\", \"\")\n\n# Tell Jinja2 templates the build is running on Read the Docs\nif os.environ.get(\"READTHEDOCS\", \"\") == \"True\":\n    if \"html_context\" not in globals():\n        html_context = {}\n    html_context[\"READTHEDOCS\"] = True\n\n# The theme to use for HTML and HTML Help pages.  See the documentation for\n# a list of builtin themes.\nhtml_theme = 'dask_sphinx_theme'\n\n# The name for this set of Sphinx documents.  If None, it defaults to\n# \"<project> v<release> documentation\".\nhtml_title = f\"{project} {display_version} documentation\"\n\n# A shorter title for the navigation bar.  Default is the same as\n# html_title.\nhtml_short_title = f\"{project} docs\"\n\n# Output file base name for HTML help builder.\nhtmlhelp_basename = 'dask_imagedoc'\n\n\n# -- Options for LaTeX output ------------------------------------------\n\nlatex_elements = {}\n\n# Grouping the document tree into LaTeX files. List of tuples\n# (source start file, target name, title, author, documentclass\n# [howto/manual]).\nlatex_documents = [\n    ('index', 'dask_image.tex',\n     u'dask-image Documentation',\n     u'John Kirkham', 'manual'),\n]\n\n\n# -- Options for manual page output ------------------------------------\n\n# One entry per manual page. List of tuples\n# (source start file, name, description, authors, manual section).\nman_pages = [\n    ('index', 'dask_image',\n     u'dask-image Documentation',\n     [u'John Kirkham'], 1)\n]\n\n\n# -- Options for Texinfo output ----------------------------------------\n\n# Grouping the document tree into Texinfo files. List of tuples\n# (source start file, target name, title, author,\n#  dir menu entry, description, category)\ntexinfo_documents = [\n    ('index', 'dask_image',\n     u'dask-image Documentation',\n     u'John Kirkham',\n     'dask_image',\n     'One line description of project.',\n     'Miscellaneous'),\n]\n\n\n# Run sphinx-apidoc before building docs.\ndef run_apidoc(_):\n    ignore_paths = [\n        \"../setup.py\",\n        \"../tests\",\n        \"../travis_pypi_setup.py\",\n    ]\n\n    argv = [\n        \"-f\",\n        \"-T\",\n        \"-e\",\n        \"-M\",\n        \"-o\", \".\",\n        \"..\"\n    ] + ignore_paths\n\n    try:\n        # Sphinx 1.7+\n        from sphinx.ext import apidoc\n    except ImportError:\n        # Sphinx 1.6 (and earlier)\n        from sphinx import apidoc\n        argv.insert(0, apidoc.__file__)\n\n    apidoc.main(argv)\n\n\ndef setup(app):\n    app.connect('builder-inited', run_apidoc)\n"
  },
  {
    "path": "docs/contributing.rst",
    "content": ".. include:: ../CONTRIBUTING.rst\n"
  },
  {
    "path": "docs/coverage.rst",
    "content": "*****************\nFunction Coverage\n*****************\n\nCoverage of dask-image vs scipy ndimage functions\n*************************************************\n\nThis table shows which SciPy ndimage functions are supported by dask-image.\n\n.. list-table::\n   :widths: 25 25 25 30\n   :header-rows: 0\n\n   * - Function name\n     - SciPy ndimage\n     - dask-image\n     - dask-image GPU support\n   * - ``affine_transform``\n     - ✓\n     - ✓\n     - ✓\n   * - ``binary_closing``\n     - ✓\n     - ✓\n     - ✓\n   * - ``binary_dilation``\n     - ✓\n     - ✓\n     - ✓\n   * - ``binary_erosion``\n     - ✓\n     - ✓\n     - ✓\n   * - ``binary_fill_holes``\n     - ✓\n     -\n     -\n   * - ``binary_hit_or_miss``\n     - ✓\n     -\n     -\n   * - ``binary_opening``\n     - ✓\n     - ✓\n     - ✓\n   * - ``binary_propagation``\n     - ✓\n     -\n     -\n   * - ``black_tophat``\n     - ✓\n     -\n     -\n   * - ``center_of_mass``\n     - ✓\n     - ✓\n     -\n   * - ``convolve``\n     - ✓\n     - ✓\n     - ✓\n   * - ``convolve1d``\n     - ✓\n     -\n     -\n   * - ``correlate``\n     - ✓\n     - ✓\n     - ✓\n   * - ``correlate1d``\n     - ✓\n     -\n     -\n   * - ``distance_transform_bf``\n     - ✓\n     -\n     -\n   * - ``distance_transform_cdt``\n     - ✓\n     -\n     -\n   * - ``distance_transform_edt``\n     - ✓\n     -\n     -\n   * - ``extrema``\n     - ✓\n     - ✓\n     -\n   * - ``find_objects``\n     - ✓\n     - ✓\n     -\n   * - ``fourier_ellipsoid``\n     - ✓\n     -\n     -\n   * - ``fourier_gaussian``\n     - ✓\n     - ✓\n     -\n   * - ``fourier_shift``\n     - ✓\n     - ✓\n     -\n   * - ``fourier_uniform``\n     - ✓\n     - ✓\n     -\n   * - ``gaussian_filter``\n     - ✓\n     - ✓\n     - ✓\n   * - ``gaussian_filter1d``\n     - ✓\n     -\n     -\n   * - ``gaussian_gradient_magnitude``\n     - ✓\n     - ✓\n     - ✓\n   * - ``gaussian_laplace``\n     - ✓\n     - ✓\n     - ✓\n   * - ``generate_binary_structure``\n     - ✓\n     -\n     -\n   * - ``generic_filter``\n     - ✓\n     - ✓\n     - ✓\n   * - ``generic_filter1d``\n     - ✓\n     -\n     -\n   * - ``generic_gradient_magnitude``\n     - ✓\n     -\n     -\n   * - ``generic_laplace``\n     - ✓\n     -\n     -\n   * - ``geometric_transform``\n     - ✓\n     -\n     -\n   * - ``grey_closing``\n     - ✓\n     -\n     -\n   * - ``grey_dilation``\n     - ✓\n     -\n     -\n   * - ``grey_erosion``\n     - ✓\n     -\n     -\n   * - ``grey_opening``\n     - ✓\n     -\n     -\n   * - ``histogram``\n     - ✓\n     - ✓\n     -\n   * - ``imread``\n     - ✓\n     - ✓\n     - ✓\n   * - ``iterate_structure``\n     - ✓\n     -\n     -\n   * - ``label``\n     - ✓\n     - ✓\n     -\n   * - ``labeled_comprehension``\n     - ✓\n     - ✓\n     -\n   * - ``laplace``\n     - ✓\n     - ✓\n     - ✓\n   * - ``map_coordinates``\n     - ✓\n     - ✓\n     -\n   * - ``maximum``\n     - ✓\n     - ✓\n     -\n   * - ``maximum_filter``\n     - ✓\n     - ✓\n     - ✓\n   * - ``maximum_filter1d``\n     - ✓\n     -\n     -\n   * - ``maximum_position``\n     - ✓\n     - ✓\n     -\n   * - ``mean``\n     - ✓\n     - ✓\n     -\n   * - ``median``\n     - ✓\n     - ✓\n     -\n   * - ``median_filter``\n     - ✓\n     - ✓\n     - ✓\n   * - ``minimum``\n     - ✓\n     - ✓\n     -\n   * - ``minimum_filter``\n     - ✓\n     - ✓\n     - ✓\n   * - ``minimum_filter1d``\n     - ✓\n     -\n     -\n   * - ``minimum_position``\n     - ✓\n     - ✓\n     -\n   * - ``morphological_gradient``\n     - ✓\n     -\n     -\n   * - ``morphological_laplace``\n     - ✓\n     -\n     -\n   * - ``percentile_filter``\n     - ✓\n     - ✓\n     - ✓\n   * - ``prewitt``\n     - ✓\n     - ✓\n     - ✓\n   * - ``rank_filter``\n     - ✓\n     - ✓\n     - ✓\n   * - ``rotate``\n     - ✓\n     - ✓\n     -\n   * - ``shift``\n     - ✓\n     -\n     -\n   * - ``sobel``\n     - ✓\n     - ✓\n     - ✓\n   * - ``spline_filter``\n     - ✓\n     - ✓\n     - ✓\n   * - ``spline_filter1d``\n     - ✓\n     - ✓\n     - ✓\n   * - ``standard_deviation``\n     - ✓\n     - ✓\n     -\n   * - ``sum_labels``\n     - ✓\n     - ✓\n     -\n   * - ``threshold_local``\n     - scikit-image function\n     - ✓\n     - ✓\n   * - ``uniform_filter``\n     - ✓\n     - ✓\n     - ✓\n   * - ``uniform_filter1d``\n     - ✓\n     -\n     -\n   * - ``variance``\n     - ✓\n     - ✓\n     -\n   * - ``watershed_ift``\n     - ✓\n     -\n     -\n   * - ``white_tophat``\n     - ✓\n     -\n     -\n   * - ``zoom``\n     - ✓\n     -\n     -\n"
  },
  {
    "path": "docs/history.rst",
    "content": ".. include:: ../HISTORY.rst\n"
  },
  {
    "path": "docs/index.rst",
    "content": "Image processing with Dask Arrays\n=================================\n\nFeatures\n--------\n\n* Support focuses on Dask Arrays.\n* Provides support for loading image files.\n* Implements commonly used N-D filters.\n* Includes a few N-D Fourier filters.\n* Provides some functions for working with N-D label images.\n* Supports a few N-D morphological operators.\n\nContents\n--------\n\n.. toctree::\n   :maxdepth: 1\n\n   installation\n   quickstart\n   coverage\n   api\n   contributing\n   authors\n   history\n\nIndices and tables\n------------------\n\n* :ref:`genindex`\n* :ref:`modindex`\n* :ref:`search`\n"
  },
  {
    "path": "docs/installation.rst",
    "content": ".. highlight:: shell\n\n============\nInstallation\n============\n\n\nStable release\n--------------\n\nTo install dask-image, run this command in your terminal:\n\n.. code-block:: console\n\n    $ conda install -c conda-forge dask-image\n\nThis is the preferred method to install dask-image, as it will always install\nthe most recent stable release.\n\nIf you don't have `conda`_ installed, we recommend downloading and installing it\nwith the conda-forge distribution `Miniforge`_.\n\nAlternatively, you can install dask-image with pip:\n\n.. code-block:: console\n\n    $ python -m pip install dask-image\n\nIf you don't have `pip`_ installed, this `Python installation guide`_\ncan guide you through the process.\n\n.. _conda: https://conda.io/en/latest/\n.. _Miniforge: https://conda-forge.org/download/\n.. _pip: https://pip.pypa.io\n.. _Python installation guide: http://docs.python-guide.org/en/latest/starting/installation/\n\n\nFrom sources\n------------\n\nThe sources for dask-image can be downloaded from the `Github repo`_.\n\nYou can either clone the public repository:\n\n.. code-block:: console\n\n    $ git clone git://github.com/dask/dask-image\n\nOr download the `tarball`_:\n\n.. code-block:: console\n\n    $ curl  -OL https://github.com/dask/dask-image/tarball/main\n\nOnce you have a copy of the source, you can install it with:\n\n.. code-block:: console\n\n    $ cd dask-image\n    $ python -m pip install .\n\n\n.. _Github repo: https://github.com/dask/dask-image\n.. _tarball: https://github.com/dask/dask-image/tarball/main\n"
  },
  {
    "path": "docs/make.bat",
    "content": "@ECHO OFF\r\n\r\nREM Command file for Sphinx documentation\r\n\r\nif \"%SPHINXBUILD%\" == \"\" (\r\n\tset SPHINXBUILD=sphinx-build\r\n)\r\nset BUILDDIR=_build\r\nset ALLSPHINXOPTS=-d %BUILDDIR%/doctrees %SPHINXOPTS% .\r\nset I18NSPHINXOPTS=%SPHINXOPTS% .\r\nif NOT \"%PAPER%\" == \"\" (\r\n\tset ALLSPHINXOPTS=-D latex_paper_size=%PAPER% %ALLSPHINXOPTS%\r\n\tset I18NSPHINXOPTS=-D latex_paper_size=%PAPER% %I18NSPHINXOPTS%\r\n)\r\n\r\nif \"%1\" == \"\" goto help\r\n\r\nif \"%1\" == \"help\" (\r\n\t:help\r\n\techo.Please use `make ^<target^>` where ^<target^> is one of\r\n\techo.  html       to make standalone HTML files\r\n\techo.  dirhtml    to make HTML files named index.html in directories\r\n\techo.  singlehtml to make a single large HTML file\r\n\techo.  pickle     to make pickle files\r\n\techo.  json       to make JSON files\r\n\techo.  htmlhelp   to make HTML files and a HTML help project\r\n\techo.  qthelp     to make HTML files and a qthelp project\r\n\techo.  devhelp    to make HTML files and a Devhelp project\r\n\techo.  epub       to make an epub\r\n\techo.  latex      to make LaTeX files, you can set PAPER=a4 or PAPER=letter\r\n\techo.  text       to make text files\r\n\techo.  man        to make manual pages\r\n\techo.  texinfo    to make Texinfo files\r\n\techo.  gettext    to make PO message catalogs\r\n\techo.  changes    to make an overview over all changed/added/deprecated items\r\n\techo.  xml        to make Docutils-native XML files\r\n\techo.  pseudoxml  to make pseudoxml-XML files for display purposes\r\n\techo.  linkcheck  to check all external links for integrity\r\n\techo.  doctest    to run all doctests embedded in the documentation if enabled\r\n\tgoto end\r\n)\r\n\r\nif \"%1\" == \"clean\" (\r\n\tfor /d %%i in (%BUILDDIR%\\*) do rmdir /q /s %%i\r\n\tdel /q /s %BUILDDIR%\\*\r\n\tgoto end\r\n)\r\n\r\n\r\n%SPHINXBUILD% 2> nul\r\nif errorlevel 9009 (\r\n\techo.\r\n\techo.The 'sphinx-build' command was not found. Make sure you have Sphinx\r\n\techo.installed, then set the SPHINXBUILD environment variable to point\r\n\techo.to the full path of the 'sphinx-build' executable. Alternatively you\r\n\techo.may add the Sphinx directory to PATH.\r\n\techo.\r\n\techo.If you don't have Sphinx installed, grab it from\r\n\techo.http://sphinx-doc.org/\r\n\texit /b 1\r\n)\r\n\r\nif \"%1\" == \"html\" (\r\n\t%SPHINXBUILD% -b html %ALLSPHINXOPTS% %BUILDDIR%/html\r\n\tif errorlevel 1 exit /b 1\r\n\techo.\r\n\techo.Build finished. The HTML pages are in %BUILDDIR%/html.\r\n\tgoto end\r\n)\r\n\r\nif \"%1\" == \"dirhtml\" (\r\n\t%SPHINXBUILD% -b dirhtml %ALLSPHINXOPTS% %BUILDDIR%/dirhtml\r\n\tif errorlevel 1 exit /b 1\r\n\techo.\r\n\techo.Build finished. The HTML pages are in %BUILDDIR%/dirhtml.\r\n\tgoto end\r\n)\r\n\r\nif \"%1\" == \"singlehtml\" (\r\n\t%SPHINXBUILD% -b singlehtml %ALLSPHINXOPTS% %BUILDDIR%/singlehtml\r\n\tif errorlevel 1 exit /b 1\r\n\techo.\r\n\techo.Build finished. The HTML pages are in %BUILDDIR%/singlehtml.\r\n\tgoto end\r\n)\r\n\r\nif \"%1\" == \"pickle\" (\r\n\t%SPHINXBUILD% -b pickle %ALLSPHINXOPTS% %BUILDDIR%/pickle\r\n\tif errorlevel 1 exit /b 1\r\n\techo.\r\n\techo.Build finished; now you can process the pickle files.\r\n\tgoto end\r\n)\r\n\r\nif \"%1\" == \"json\" (\r\n\t%SPHINXBUILD% -b json %ALLSPHINXOPTS% %BUILDDIR%/json\r\n\tif errorlevel 1 exit /b 1\r\n\techo.\r\n\techo.Build finished; now you can process the JSON files.\r\n\tgoto end\r\n)\r\n\r\nif \"%1\" == \"htmlhelp\" (\r\n\t%SPHINXBUILD% -b htmlhelp %ALLSPHINXOPTS% %BUILDDIR%/htmlhelp\r\n\tif errorlevel 1 exit /b 1\r\n\techo.\r\n\techo.Build finished; now you can run HTML Help Workshop with the ^\r\n.hhp project file in %BUILDDIR%/htmlhelp.\r\n\tgoto end\r\n)\r\n\r\nif \"%1\" == \"qthelp\" (\r\n\t%SPHINXBUILD% -b qthelp %ALLSPHINXOPTS% %BUILDDIR%/qthelp\r\n\tif errorlevel 1 exit /b 1\r\n\techo.\r\n\techo.Build finished; now you can run \"qcollectiongenerator\" with the ^\r\n.qhcp project file in %BUILDDIR%/qthelp, like this:\r\n\techo.^> qcollectiongenerator %BUILDDIR%\\qthelp\\dask_image.qhcp\r\n\techo.To view the help file:\r\n\techo.^> assistant -collectionFile %BUILDDIR%\\qthelp\\dask_image.ghc\r\n\tgoto end\r\n)\r\n\r\nif \"%1\" == \"devhelp\" (\r\n\t%SPHINXBUILD% -b devhelp %ALLSPHINXOPTS% %BUILDDIR%/devhelp\r\n\tif errorlevel 1 exit /b 1\r\n\techo.\r\n\techo.Build finished.\r\n\tgoto end\r\n)\r\n\r\nif \"%1\" == \"epub\" (\r\n\t%SPHINXBUILD% -b epub %ALLSPHINXOPTS% %BUILDDIR%/epub\r\n\tif errorlevel 1 exit /b 1\r\n\techo.\r\n\techo.Build finished. The epub file is in %BUILDDIR%/epub.\r\n\tgoto end\r\n)\r\n\r\nif \"%1\" == \"latex\" (\r\n\t%SPHINXBUILD% -b latex %ALLSPHINXOPTS% %BUILDDIR%/latex\r\n\tif errorlevel 1 exit /b 1\r\n\techo.\r\n\techo.Build finished; the LaTeX files are in %BUILDDIR%/latex.\r\n\tgoto end\r\n)\r\n\r\nif \"%1\" == \"latexpdf\" (\r\n\t%SPHINXBUILD% -b latex %ALLSPHINXOPTS% %BUILDDIR%/latex\r\n\tcd %BUILDDIR%/latex\r\n\tmake all-pdf\r\n\tcd %BUILDDIR%/..\r\n\techo.\r\n\techo.Build finished; the PDF files are in %BUILDDIR%/latex.\r\n\tgoto end\r\n)\r\n\r\nif \"%1\" == \"latexpdfja\" (\r\n\t%SPHINXBUILD% -b latex %ALLSPHINXOPTS% %BUILDDIR%/latex\r\n\tcd %BUILDDIR%/latex\r\n\tmake all-pdf-ja\r\n\tcd %BUILDDIR%/..\r\n\techo.\r\n\techo.Build finished; the PDF files are in %BUILDDIR%/latex.\r\n\tgoto end\r\n)\r\n\r\nif \"%1\" == \"text\" (\r\n\t%SPHINXBUILD% -b text %ALLSPHINXOPTS% %BUILDDIR%/text\r\n\tif errorlevel 1 exit /b 1\r\n\techo.\r\n\techo.Build finished. The text files are in %BUILDDIR%/text.\r\n\tgoto end\r\n)\r\n\r\nif \"%1\" == \"man\" (\r\n\t%SPHINXBUILD% -b man %ALLSPHINXOPTS% %BUILDDIR%/man\r\n\tif errorlevel 1 exit /b 1\r\n\techo.\r\n\techo.Build finished. The manual pages are in %BUILDDIR%/man.\r\n\tgoto end\r\n)\r\n\r\nif \"%1\" == \"texinfo\" (\r\n\t%SPHINXBUILD% -b texinfo %ALLSPHINXOPTS% %BUILDDIR%/texinfo\r\n\tif errorlevel 1 exit /b 1\r\n\techo.\r\n\techo.Build finished. The Texinfo files are in %BUILDDIR%/texinfo.\r\n\tgoto end\r\n)\r\n\r\nif \"%1\" == \"gettext\" (\r\n\t%SPHINXBUILD% -b gettext %I18NSPHINXOPTS% %BUILDDIR%/locale\r\n\tif errorlevel 1 exit /b 1\r\n\techo.\r\n\techo.Build finished. The message catalogs are in %BUILDDIR%/locale.\r\n\tgoto end\r\n)\r\n\r\nif \"%1\" == \"changes\" (\r\n\t%SPHINXBUILD% -b changes %ALLSPHINXOPTS% %BUILDDIR%/changes\r\n\tif errorlevel 1 exit /b 1\r\n\techo.\r\n\techo.The overview file is in %BUILDDIR%/changes.\r\n\tgoto end\r\n)\r\n\r\nif \"%1\" == \"linkcheck\" (\r\n\t%SPHINXBUILD% -b linkcheck %ALLSPHINXOPTS% %BUILDDIR%/linkcheck\r\n\tif errorlevel 1 exit /b 1\r\n\techo.\r\n\techo.Link check complete; look for any errors in the above output ^\r\nor in %BUILDDIR%/linkcheck/output.txt.\r\n\tgoto end\r\n)\r\n\r\nif \"%1\" == \"doctest\" (\r\n\t%SPHINXBUILD% -b doctest %ALLSPHINXOPTS% %BUILDDIR%/doctest\r\n\tif errorlevel 1 exit /b 1\r\n\techo.\r\n\techo.Testing of doctests in the sources finished, look at the ^\r\nresults in %BUILDDIR%/doctest/output.txt.\r\n\tgoto end\r\n)\r\n\r\nif \"%1\" == \"xml\" (\r\n\t%SPHINXBUILD% -b xml %ALLSPHINXOPTS% %BUILDDIR%/xml\r\n\tif errorlevel 1 exit /b 1\r\n\techo.\r\n\techo.Build finished. The XML files are in %BUILDDIR%/xml.\r\n\tgoto end\r\n)\r\n\r\nif \"%1\" == \"pseudoxml\" (\r\n\t%SPHINXBUILD% -b pseudoxml %ALLSPHINXOPTS% %BUILDDIR%/pseudoxml\r\n\tif errorlevel 1 exit /b 1\r\n\techo.\r\n\techo.Build finished. The pseudo-XML files are in %BUILDDIR%/pseudoxml.\r\n\tgoto end\r\n)\r\n\r\n:end\r\n"
  },
  {
    "path": "docs/quickstart.rst",
    "content": ".. highlight:: shell\n\n==========\nQuickstart\n==========\n\n\nImporting dask-image\n--------------------\nImport dask image is with an underscore, like this example:\n\n.. code-block:: python\n\n    import dask_image.imread\n    import dask_image.ndfilters\n\n\nDask Examples\n-------------\nWe highly recommend checking out the dask-image-quickstart.ipynb notebook\n(and any other dask-image example notebooks) at the dask-examples repository.\nYou can find the dask-image quickstart notebook in the ``applications`` folder\nof this repository:\n\nhttps://github.com/dask/dask-examples\n\nThe direct link to the notebook file is here:\n\nhttps://github.com/dask/dask-examples/blob/main/applications/image-processing.ipynb\n\nAll the example notebooks are available to launch with\nmybinder and test out interactively.\n\n\nAn Even Quicker Start\n---------------------\n\nYou can read files stored on disk into a dask array\nby passing the filename, or regex matching multiple filenames\ninto ``imread()``.\n\n.. code-block:: python\n\n    filename_pattern = 'path/to/image-*.png'\n    images = dask_image.imread.imread(filename_pattern)\n\nIf your images are parts of a much larger image,\ndask can stack, concatenate or block chunks together:\nhttp://docs.dask.org/en/latest/array-stack.html\n\n\nCalling dask-image functions is also easy.\n\n.. code-block:: python\n\n    import dask_image.ndfilters\n    blurred_image = dask_image.ndfilters.gaussian_filter(images, sigma=10)\n\n\nMany other functions can be applied to dask arrays.\nSee the dask_array_documentation_ for more detail on general array operations.\n\n.. _dask_array_documentation: http://docs.dask.org/en/latest/array.html\n\n.. code-block:: python\n\n    result = function_name(images)\n\n\nFurther Reading\n---------------\n\nGood places to start include:\n\n* The dask-image API documentation: http://image.dask.org/en/latest/api.html\n* The documentation on working with dask arrays: http://docs.dask.org/en/latest/array.html\n\n\nTalks and Slides\n----------------\n\nHere are some talks and slides that you can watch to learn dask-image:\n\n- 2020, Genevieve Buckley's talk at PyConAU and SciPy Japan\n\n  - `Watch the talk <https://www.youtube.com/watch?v=MpjgzNeISeI&list=PLs4CJRBY5F1IEFq-wumrBDRCu2EqkpY-R&index=2>`_\n  - `Scipy Japanのトークを見る(プレゼンテーション:英語, 字幕:日本語) <https://www.youtube.com/watch?v=dP0m2iZX0PU>`_ Watch the talk at SciPy Japan (presentation in English, captions in Japanese)\n  - `See the slides <https://genevievebuckley.github.io/dask-image-talk-2020>`_\n\n- 2019, John Kirkham's SciPy talk\n\n  - `Watch the talk here <https://www.youtube.com/watch?v=XGUS174vvLs>`_\n  - `See the slides here <https://nbviewer.ipython.org/format/slides/github/jakirkham/scipy2019/blob/master/slides.ipynb#/>`_\n"
  },
  {
    "path": "docs/release/generate_release_notes.py",
    "content": "\"\"\"Generate the release notes automatically from Github pull requests.\nStart with:\n```\nexport GH_TOKEN=<your-gh-api-token>\n```\nThen, for to include everything from a certain release to main:\n```\npython /path/to/generate_release_notes.py v0.14.0 main --version 0.15.0\n```\nOr two include only things between two releases:\n```\npython /path/to/generate_release_notes.py v.14.2 v0.14.3 --version 0.14.3\n```\nYou should probably redirect the output with:\n```\npython /path/to/generate_release_notes.py [args] | tee release_notes.md\n```\nYou'll require PyGitHub and tqdm, which you can install with:\n```\npip install PyGithub>=1.44.1 twine>=3.1.1 tqdm\n```\nReferences\nhttps://github.com/scikit-image/scikit-image/blob/master/tools/generate_release_notes.py\nhttps://github.com/scikit-image/scikit-image/issues/3404\nhttps://github.com/scikit-image/scikit-image/issues/3405\n\"\"\"\nimport os\nimport argparse\nfrom datetime import datetime\nfrom collections import OrderedDict\nfrom warnings import warn\n\nfrom github import Github\n\ntry:\n    from tqdm import tqdm\nexcept ImportError:\n    warn(\n        'tqdm not installed. This script takes approximately 5 minutes '\n        'to run. To view live progressbars, please install tqdm. '\n        'Otherwise, be patient.'\n    )\n\n    def tqdm(i, **kwargs):\n        return i\n\n\nGH = \"https://github.com\"\nGH_USER = 'dask'\nGH_REPO = 'dask-image'\nGH_TOKEN = os.environ.get('GH_TOKEN')\nif GH_TOKEN is None:\n    raise RuntimeError(\n        \"It is necessary that the environment variable `GH_TOKEN` \"\n        \"be set to avoid running into problems with rate limiting. \"\n        \"One can be acquired at https://github.com/settings/tokens.\\n\\n\"\n        \"You do not need to select any permission boxes while generating \"\n        \"the token.\"\n    )\n\ng = Github(GH_TOKEN)\nrepository = g.get_repo(f'{GH_USER}/{GH_REPO}')\n\n\nparser = argparse.ArgumentParser(usage=__doc__)\nparser.add_argument('from_commit', help='The starting tag.')\nparser.add_argument('to_commit', help='The head branch.')\nparser.add_argument(\n    '--version', help=\"Version you're about to release.\", default='0.2.0'\n)\n\nargs = parser.parse_args()\n\nfor tag in repository.get_tags():\n    if tag.name == args.from_commit:\n        previous_tag = tag\n        break\nelse:\n    raise RuntimeError(f'Desired tag ({args.from_commit}) not found')\n\n# For some reason, go get the github commit from the commit to get\n# the correct date\ngithub_commit = previous_tag.commit.commit\nprevious_tag_date = datetime.strptime(\n    github_commit.last_modified, '%a, %d %b %Y %H:%M:%S %Z'\n)\n\n\nall_commits = list(\n    tqdm(\n        repository.get_commits(sha=args.to_commit, since=previous_tag_date),\n        desc=f'Getting all commits between {args.from_commit} '\n        f'and {args.to_commit}',\n    )\n)\nall_hashes = set(c.sha for c in all_commits)\n\n\ndef add_to_users(users, new_user):\n    if new_user.name is None:\n        users[new_user.login] = new_user.login\n    else:\n        users[new_user.login] = new_user.name\n\n\nauthors = set()\ncommitters = set()\nreviewers = set()\nusers = {}\n\nfor commit in tqdm(all_commits, desc=\"Getting commiters and authors\"):\n    if commit.committer is not None:\n        add_to_users(users, commit.committer)\n        committers.add(commit.committer.login)\n    if commit.author is not None:\n        add_to_users(users, commit.author)\n        authors.add(commit.author.login)\n\n# remove these bots.\ncommitters.discard(\"web-flow\")\nauthors.discard(\"azure-pipelines-bot\")\n\nhighlights = OrderedDict()\n\nhighlights['Highlights'] = {}\nhighlights['New Features'] = {}\nhighlights['Improvements'] = {}\nhighlights['Bug Fixes'] = {}\nhighlights['API Changes'] = {}\nhighlights['Deprecations'] = {}\nhighlights['Build Tools'] = {}\nother_pull_requests = {}\n\nfor pull in tqdm(\n    g.search_issues(\n        f'repo:{GH_USER}/{GH_REPO} '\n        f'is:pull-request '\n        f'merged:>{previous_tag_date.isoformat()} '\n        'sort:created-asc'\n    ),\n    desc='Pull Requests...',\n):\n    pr = repository.get_pull(pull.number)\n    if pr.merge_commit_sha in all_hashes:\n        summary = pull.title\n        for review in pr.get_reviews():\n            if review.user is not None:\n                add_to_users(users, review.user)\n                reviewers.add(review.user.login)\n        for key, key_dict in highlights.items():\n            pr_title_prefix = (key + ': ').lower()\n            if summary.lower().startswith(pr_title_prefix):\n                key_dict[pull.number] = {\n                    'summary': summary[len(pr_title_prefix):]\n                }\n                break\n        else:\n            other_pull_requests[pull.number] = {'summary': summary}\n\n\n# add Other PRs to the ordered dict to make doc generation easier.\nhighlights['Other Pull Requests'] = other_pull_requests\n\n\n# Now generate the release notes\ntitle = (f'{args.version} ({datetime.today().strftime(\"%Y-%m-%d\")})')\ntitle += '\\n' + '-' * len(title)  # title underline of same length as title\nprint(title)\n\nprint(\n    f\"\"\"\nWe're pleased to announce the release of dask-image {args.version}!\n\"\"\"\n)\n\nfor section, pull_request_dicts in highlights.items():\n    print(f'{section}\\n')\n    if len(pull_request_dicts.items()) == 0:\n        print()\n    for number, pull_request_info in pull_request_dicts.items():\n        print(f'* {pull_request_info[\"summary\"]} (#{number})')\n\n\ncontributors = OrderedDict()\n\ncontributors['authors'] = authors\ncontributors['reviewers'] = reviewers\n# ignore committers\n# contributors['committers'] = committers\n\nfor section_name, contributor_set in contributors.items():\n    print()\n    if None in contributor_set:\n        contributor_set.remove(None)\n    committer_str = (\n        f'{len(contributor_set)} {section_name} added to this '\n        'release (alphabetical)'\n    )\n    print(committer_str)\n    print()\n\n    for c in sorted(contributor_set, key=lambda x: users[x].lower()):\n        commit_link = f\"{GH}/{GH_USER}/{GH_REPO}/commits?author={c}\"\n        print(f\"* `{users[c]} <{commit_link}>`_ - @{c}\")\n    print()\n"
  },
  {
    "path": "docs/release/release_guide.rst",
    "content": "=============\nRelease Guide\n=============\n\nThis guide documents the ``dask-image`` release process.\nIt is based on the ``napari`` release guide created by Kira Evans.\n\nThis guide is primarily intended for core developers of `dask-image`.\nThey will need to have a `PyPI <https://pypi.org>`_ account\nwith upload permissions to the ``dask-image`` package.\nThey will also need permissions to merge pull requests\nin the ``dask-image`` conda-forge feedstock repository:\nhttps://github.com/conda-forge/dask-image-feedstock.\n\nYou will also need these additional release dependencies\nto complete the release process:\n\n\n.. code-block:: bash\n\n   pip install \"PyGithub>=1.44.1\" \"twine>=3.1.1\" tqdm\n\n\n\nSet PyPI password as GitHub secret\n----------------------------------\n\nThe `dask/dask-image` repository must have a PyPI API token as a GitHub secret.\n\nThis likely has been done already, but if it has not, follow\n`this guide <https://pypi.org/help/#apitoken>`_ to gain a token and\n`this other guide <https://help.github.com/en/actions/automating-your-workflow-with-github-actions/creating-and-using-encrypted-secrets>`_\nto add it as a secret.\n\n\nDetermining the new version number\n----------------------------------\n\nWe use `calendar versioning (CalVer) <https://calver.org/>`_\nfor `dask-image`. This means version numbers have the format\n`YYYY.MM.X`. Here, YYYY indicates the year, MM indicates the month,\nand X is an integer counter beginning at zero (to distinguish\nbetween cases where multiple releases were made in the same month).\n\n`setuptools-scm <https://setuptools-scm.readthedocs.io/en/stable/>`_\nthen determines the exact version from the latest\n`git tag <https://git-scm.com/book/en/v2/Git-Basics-Tagging>`_\nbeginning with `v`. So our git tags will have the format `vYYYY.MM.X`.\n\nSo for example, a git tag \"v2030.01.0\" will be the first release\nmade in the month of January, in the year 2030.\n\nGenerate the release notes\n--------------------------\n\nThe release notes contain a list of merges, contributors, and reviewers.\n\n1. Create a GH_TOKEN environment variable on your computer.\n\n    On Linux/Mac:\n\n    .. code-block:: bash\n\n       export GH_TOKEN=<your-gh-api-token>\n\n    On Windows:\n\n    .. code-block::\n\n       set GH_TOKEN <your-gh-api-token>\n\n\n    If you don't already have a\n    `personal GitHub API token <https://github.blog/2013-05-16-personal-api-tokens/>`_,\n    you can create one from the developer settings of your GitHub account:\n    `<https://github.com/settings/tokens>`_\n\n\n2. Run the python script to generate the release notes,\nincluding all changes since the last tagged release.\n\n    Note: The PyGithub package must be installed to run this script (https://github.com/PyGithub/PyGithub)\n\n    Call the script like this:\n\n    .. code-block:: bash\n\n       python docs/release/generate_release_notes.py  <last-version-tag> main --version <new-version-number>\n\n\n    An example:\n\n    .. code-block:: bash\n\n       python docs/release/generate_release_notes.py  v2021.05.24 main --version 2021.06.03\n\n\n    See help for this script with:\n\n    .. code-block:: bash\n\n       python docs/release/generate_release_notes.py -h\n\n\n3. Scan the PR titles for highlights, deprecations, API changes,\n   and bugfixes, and mention these in the relevant sections of the notes.\n   Try to present the information in an expressive way by mentioning\n   the affected functions, elaborating on the changes and their\n   consequences. If possible, organize semantically close PRs in groups.\n\n4. Copy your edited release notes into the file ``HISTORY.rst``.\n\n5. Make and merge a PR with the release notes before moving onto the next steps.\n\n\nCreate the release candidate\n-----------------------------\n\nGo to the dask-image releases page: https://github.com/dask/dask-image/releases\n\nClick the \"Draft Release\" button to create a new release candidate.\n\n- Both the tag version and release title should have the format ``vYYYY.MM.Xrc1``.\n- Copy-paste the release notes from ``HISTORY.rst`` for this release into the\n  description text box.\n- Tick \"Set as a pre-release\"\n\nNote here how we are using ``rc`` for release candidate to create a version\nof our release we can test before making the real release.\n\nCreating the release will trigger a GitHub actions script,\nwhich automatically uploads the release to PyPI.\n\n\nTesting the release candidate\n-----------------------------\n\nThe release candidate can then be tested with\n\n.. code-block:: bash\n\n   pip install --pre dask-image\n\n\nIt is recommended that the release candidate is tested in a virtual environment\nin order to isolate dependencies.\n\nIf the release candidate is not what you want, make your changes and\nrepeat the process from the beginning but\nincrementing the number after ``rc`` (e.g. ``vYYYY.MM.Xrc1``).\n\nOnce you are satisfied with the release candidate it is time to generate\nthe actual release.\n\nGenerating the actual release\n-----------------------------\n\nTo generate the actual release you will now repeat the processes above\nbut now\n- dropping the ``rc`` suffix from the version number.\n- ticking \"Set as the latest release\"\n\nThis will automatically upload the release to PyPI, and will also\nautomatically begin the process to release the new version on conda-forge.\n\nReleasing on conda-forge\n------------------------\n\nIt usually takes about an hour or so for the conda-forge bot\n``regro-cf-autotick-bot`` to see that there is a new release\navailable on PyPI, and open a pull request in the ``dask-image``\nconda-forge feedstock here: https://github.com/conda-forge/dask-image-feedstock\n\nNote: the conda-forge bot will not open a PR for any of the release candidates,\nonly for the final release. Only one PR is opened for\n\nAs an alternative to waiting for the conda-forge bot to notice the new release,\nyou can submit a new dask-image feedstock issue indicating\n``@conda-forge-admin, please update version`` in the issue title. This will\n`trigger <https://conda-forge.org/docs/maintainer/infrastructure.html#conda-forge-admin-please-update-version>`_`\nthe bot to check for new versions.\n\nBefore merging the pull request, first you should check:\n\n* That all the tests have passed on CI for this pull request\n* If any dependencies were changed, and should be updated by\n  commiting changes to ``recipe/meta.yaml`` to the pull request\n\nOnce that all looks good you can merge the pull request,\nand the newest version of ``dask-image`` will automatically be made\navailable on conda-forge. We're finished!\n\n"
  },
  {
    "path": "pyproject.toml",
    "content": "[build-system]\nrequires = [\"setuptools>=64\", \"setuptools_scm>=8\"]\nbuild-backend = \"setuptools.build_meta\"\n\n[project]\nname = \"dask-image\"\nauthors = [{name=\"dask-image contributors. see https://github.com/dask/dask-image/graphs/contributors\"}]  # noqa: E501\ndescription = \"Distributed image processing\"\nkeywords = [\"dask-image\", \"dask\", \"image\"]\nreadme = \"README.rst\"\nlicense = { text = \"BSD-3-Clause\" }\ndynamic = [\"version\"]\nrequires-python = \">=3.9\"\nclassifiers = [\n    \"Development Status :: 2 - Pre-Alpha\",\n    \"Intended Audience :: Developers\",\n    \"License :: OSI Approved :: BSD License\",\n    \"Natural Language :: English\",\n    \"Operating System :: OS Independent\",\n    \"Programming Language :: Python :: 3\",\n    \"Programming Language :: Python :: 3.9\",\n    \"Programming Language :: Python :: 3.10\",\n    \"Programming Language :: Python :: 3.11\",\n    \"Programming Language :: Python :: 3.12\",\n]\ndependencies = [\n    \"dask[array] >=2024.4.1\",\n    \"numpy >=1.18\",\n    \"scipy >=1.7.0\",\n    \"pims >=0.4.1\",\n    \"tifffile >=2020.10.1\",\n]\n\n[project.optional-dependencies]\ndataframe = [\n    \"dask[dataframe] >=2024.4.1\",\n    \"pandas >=2.0.0\",\n]\ntest = [\n    \"build >=1.2.1\",\n    \"coverage >=7.2.1\",\n    \"flake8 >=6.0.0\",\n    \"Flake8-pyproject\",\n    \"pytest >=7.2.2\",\n    \"pytest-cov >=4.0.0\",\n    \"pytest-flake8 >=1.1.1\",\n    \"pytest-timeout >=2.3.1\",\n    \"twine >=3.1.1\",\n]\ngpu = [\n    \"cupy >=9.0.0\",\n]\n\n[project.urls]\n\"Homepage\" = \"https://image.dask.org\"\n\"Issue Tracker\" = \"https://github.com/dask/dask-image/issues\"\n\"Source Code\" = \"https://github.com/dask/dask-image\"\n\n[tool.setuptools_scm]\nversion_scheme = \"no-guess-dev\"\nversion_file = \"dask_image/_version.py\"\n\n[tool.setuptools]\ninclude-package-data = true\nzip-safe = false\nlicense-files = [\n    \"LICENSE.txt\",\n]\n\n[tool.setuptools.packages.find]\ninclude = [\n    \"dask_image*\",\n]\n\n[tool.pytest.ini_options]\naddopts = \"--flake8\"\nmarkers = \"cupy\"\n\n[tool.flake8]\nexclude = [\"dask_image/_version.py\"]\n"
  },
  {
    "path": "tests/__init__.py",
    "content": "# -*- coding: utf-8 -*-\n"
  },
  {
    "path": "tests/test_dask_image/test_imread/__init__.py",
    "content": "# -*- coding: utf-8 -*-\n"
  },
  {
    "path": "tests/test_dask_image/test_imread/test_core.py",
    "content": "#!/usr/bin/env python\n# -*- coding: utf-8 -*-\nimport numbers\nimport pathlib\n\nimport pytest\n\nimport numpy as np\nimport tifffile\n\nimport dask.array as da\nimport dask_image.imread\n\n\n@pytest.mark.parametrize(\n    \"err_type, nframes\",\n    [\n        (ValueError, 1.0),\n        (ValueError, 0),\n        (ValueError, -2),\n    ]\n)\ndef test_errs_imread(err_type, nframes):\n    with pytest.raises(err_type):\n        dask_image.imread.imread(\"test.tiff\", nframes=nframes)\n\n\n@pytest.mark.parametrize(\n    \"seed\",\n    [\n        0,\n        1,\n    ]\n)\n@pytest.mark.parametrize(\n    \"nframes, shape\",\n    [\n        (1, (1, 4, 3)),\n        (-1, (1, 4, 3)),\n        (3, (1, 4, 3)),\n        (1, (5, 4, 3)),\n        (2, (5, 4, 3)),\n        (1, (10, 5, 4, 3)),\n        (5, (10, 5, 4, 3)),\n        (10, (10, 5, 4, 3)),\n        (-1, (10, 5, 4, 3)),\n    ]\n)\n@pytest.mark.parametrize(\n    \"dtype\",\n    [\n        np.int16,\n        np.int32,\n        np.float32,\n    ]\n)\n@pytest.mark.parametrize(\n    \"is_pathlib_Path\",\n    [\n        True,\n        False,\n    ]\n)\ndef test_tiff_imread(tmpdir, seed, nframes, shape, dtype, is_pathlib_Path):  # noqa: E501\n    np.random.seed(seed)\n\n    dirpth = tmpdir.mkdir(\"test_imread\")\n    dtype = np.dtype(dtype).type\n\n    low, high = 0.0, 1.0\n    if isinstance(dtype, numbers.Integral):\n        low, high = np.iinfo(dtype).min, np.iinfo(dtype).max\n\n    a = np.random.uniform(low=low, high=high, size=shape).astype(dtype)\n\n    fn = str(dirpth.join(\"test.tiff\"))\n    with tifffile.TiffWriter(fn) as fh:\n        for i in range(len(a)):\n            fh.write(a[i], contiguous=True)\n\n    if is_pathlib_Path:\n        fn = pathlib.Path(fn)\n    d = dask_image.imread.imread(fn, nframes=nframes)\n\n    if nframes == -1:\n        nframes = shape[0]\n\n    assert min(nframes, shape[0]) == max(d.chunks[0])\n\n    if shape[0] % nframes == 0:\n        assert nframes == d.chunks[0][-1]\n    else:\n        assert (shape[0] % nframes) == d.chunks[0][-1]\n\n    da.utils.assert_eq(a, d)\n\n\ndef test_tiff_imread_glob_natural_sort(tmpdir):\n    dirpth = tmpdir.mkdir(\"test_imread\")\n    tifffile.imwrite(dirpth.join(\"10.tif\"), np.array([10]))\n    tifffile.imwrite(dirpth.join(\"9.tif\"), np.array([9]))\n    actual = np.array(dask_image.imread.imread(dirpth.join(\"*.tif\")))\n    assert np.all(actual == np.array([[9], [10]]))\n"
  },
  {
    "path": "tests/test_dask_image/test_imread/test_cupy_imread.py",
    "content": "import numpy as np\nimport tifffile\nimport pytest\n\nimport dask_image.imread\n\ncupy = pytest.importorskip(\"cupy\", minversion=\"6.0.0\")\n\n\n@pytest.mark.cupy\ndef test_cupy_imread(tmp_path):\n    a = np.random.uniform(low=0.0, high=1.0, size=(1, 4, 3)).astype(np.float32)\n\n    fn = str(tmp_path/\"test.tiff\")\n    with tifffile.TiffWriter(fn) as fh:\n        for i in range(len(a)):\n            fh.write(a[i])\n\n    result = dask_image.imread.imread(fn, arraytype=\"cupy\")\n    assert type(result._meta) is cupy.ndarray\n    assert type(result.compute()) == cupy.ndarray\n"
  },
  {
    "path": "tests/test_dask_image/test_ndfilters/__init__.py",
    "content": "# -*- coding: utf-8 -*-\n"
  },
  {
    "path": "tests/test_dask_image/test_ndfilters/test__conv.py",
    "content": "#!/usr/bin/env python\n# -*- coding: utf-8 -*-\nimport pytest\nimport numpy as np\nimport scipy.ndimage\n\nimport dask.array as da\n\nimport dask_image.ndfilters\n\n\n@pytest.mark.parametrize(\n    \"da_func\",\n    [\n        (dask_image.ndfilters.convolve),\n        (dask_image.ndfilters.correlate),\n    ]\n)\n@pytest.mark.parametrize(\n    \"err_type, weights, origin\",\n    [\n        (ValueError, np.ones((1,)), 0),\n        (ValueError, np.ones((1, 0)), 0),\n        (RuntimeError, np.ones((1, 1)), (0,)),\n        (RuntimeError, np.ones((1, 1)), [(0,)]),\n        (ValueError, np.ones((1, 1)), 1),\n        (TypeError, np.ones((1, 1)), 0.0),\n        (TypeError, np.ones((1, 1)), (0.0, 0.0)),\n        (TypeError, np.ones((1, 1)), 1+0j),\n        (TypeError, np.ones((1, 1)), (0+0j, 1+0j)),\n    ]\n)\ndef test_convolutions_params(da_func,\n                             err_type,\n                             weights,\n                             origin):\n    a = np.arange(140.0).reshape(10, 14)\n    d = da.from_array(a, chunks=(5, 7))\n\n    with pytest.raises(err_type):\n        da_func(d,\n                weights,\n                origin=origin)\n\n\n@pytest.mark.parametrize(\n    \"da_func\",\n    [\n        dask_image.ndfilters.convolve,\n        dask_image.ndfilters.correlate,\n    ]\n)\ndef test_convolutions_shape_type(da_func):\n    weights = np.ones((1, 1))\n\n    a = np.arange(140.0).reshape(10, 14)\n    d = da.from_array(a, chunks=(5, 7))\n\n    assert all([(type(s) is int) for s in d.shape])\n\n    d2 = da_func(d, weights)\n\n    assert all([(type(s) is int) for s in d2.shape])\n\n\n@pytest.mark.parametrize(\n    \"da_func\",\n    [\n        dask_image.ndfilters.convolve,\n        dask_image.ndfilters.correlate,\n    ]\n)\ndef test_convolutions_comprehensions(da_func):\n    np.random.seed(0)\n\n    a = np.random.random((3, 12, 14))\n    d = da.from_array(a, chunks=(3, 6, 7))\n\n    weights = np.ones((1, 1))\n\n    l2s = [da_func(d[i], weights) for i in range(len(d))]\n    l2c = [da_func(d[i], weights)[None] for i in range(len(d))]\n\n    da.utils.assert_eq(np.stack(l2s), da.stack(l2s))\n    da.utils.assert_eq(np.concatenate(l2c), da.concatenate(l2c))\n\n\n@pytest.mark.parametrize(\n    \"sp_func, da_func\",\n    [\n        (scipy.ndimage.convolve, dask_image.ndfilters.convolve),\n        (scipy.ndimage.correlate, dask_image.ndfilters.correlate),\n    ]\n)\n@pytest.mark.parametrize(\n    \"weights\",\n    [\n        np.ones((1, 1)),\n    ]\n)\ndef test_convolutions_identity(sp_func,\n                               da_func,\n                               weights):\n    a = np.arange(140.0).reshape(10, 14)\n    d = da.from_array(a, chunks=(5, 7))\n\n    da.utils.assert_eq(\n        d, da_func(d, weights)\n    )\n\n    da.utils.assert_eq(\n        sp_func(a, weights),\n        da_func(d, weights)\n    )\n\n\n@pytest.mark.parametrize(\n    \"sp_func, da_func\",\n    [\n        (scipy.ndimage.convolve, dask_image.ndfilters.convolve),\n        (scipy.ndimage.correlate, dask_image.ndfilters.correlate),\n    ]\n)\n@pytest.mark.parametrize(\n    \"weights, origin\",\n    [\n        (np.ones((2, 2)), 0),\n        (np.ones((2, 3)), 0),\n        (np.ones((2, 3)), (0, 1)),\n        (np.ones((2, 3)), (0, -1)),\n        ((np.mgrid[-2: 2+1, -2: 2+1]**2).sum(axis=0) < 2.5**2, 0),\n        ((np.mgrid[-2: 2+1, -2: 2+1]**2).sum(axis=0) < 2.5**2, (1, 2)),\n        ((np.mgrid[-2: 2+1, -2: 2+1]**2).sum(axis=0) < 2.5**2, (-1, -2)),\n        (np.ones((5, 5)), 0),\n        (np.ones((7, 7)), 0),\n        (np.ones((8, 8)), 0),\n        (np.ones((10, 10)), 0),\n        (np.ones((5, 5)), 2),\n        (np.ones((5, 5)), -2),\n    ]\n)\ndef test_convolutions_compare(sp_func,\n                              da_func,\n                              weights,\n                              origin):\n    a = np.arange(140.0).reshape(10, 14)\n    d = da.from_array(a, chunks=(5, 7))\n\n    da.utils.assert_eq(\n        sp_func(\n            a, weights, origin=origin\n        ),\n        da_func(\n            d, weights, origin=origin\n        )\n    )\n\n\n@pytest.mark.parametrize(\n    \"sp_func, da_func\",\n    [\n        (scipy.ndimage.convolve, dask_image.ndfilters.convolve),\n        (scipy.ndimage.correlate, dask_image.ndfilters.correlate),\n    ]\n)\n@pytest.mark.parametrize(\n    \"weights\",\n    [\n        np.ones((1, 5)),\n        np.ones((5, 1)),\n    ]\n)\n@pytest.mark.parametrize(\n    \"mode\",\n    [\"reflect\", \"wrap\", \"nearest\", \"constant\", \"mirror\"]\n)\ndef test_convolutions_modes(sp_func,\n                            da_func,\n                            weights,\n                            mode):\n    a = np.arange(140).reshape(10, 14)\n    d = da.from_array(a, chunks=(5, 7))\n\n    da.utils.assert_eq(\n        sp_func(\n            a, weights, mode=mode\n        ),\n        da_func(\n            d, weights, mode=mode\n        )\n    )\n"
  },
  {
    "path": "tests/test_dask_image/test_ndfilters/test__diff.py",
    "content": "#!/usr/bin/env python\n# -*- coding: utf-8 -*-\nimport numpy as np\nimport scipy.ndimage\n\nimport dask.array as da\n\nimport dask_image.ndfilters\n\n\ndef test_laplace_comprehensions():\n    np.random.seed(0)\n\n    a = np.random.random((3, 12, 14))\n    d = da.from_array(a, chunks=(3, 6, 7))\n\n    l2s = [dask_image.ndfilters.laplace(d[i]) for i in range(len(d))]\n    l2c = [dask_image.ndfilters.laplace(d[i])[None] for i in range(len(d))]\n\n    da.utils.assert_eq(np.stack(l2s), da.stack(l2s))\n    da.utils.assert_eq(np.concatenate(l2c), da.concatenate(l2c))\n\n\ndef test_laplace_compare():\n    s = (10, 11, 12)\n    a = np.arange(float(np.prod(s))).reshape(s)\n    d = da.from_array(a, chunks=(5, 5, 6))\n\n    da.utils.assert_eq(\n        scipy.ndimage.laplace(a), dask_image.ndfilters.laplace(d)\n    )\n"
  },
  {
    "path": "tests/test_dask_image/test_ndfilters/test__edge.py",
    "content": "#!/usr/bin/env python\n# -*- coding: utf-8 -*-\nimport pytest\nimport numpy as np\nimport scipy.ndimage\n\nimport dask.array as da\n\nimport dask_image.ndfilters\n\n\n@pytest.mark.parametrize(\n    \"err_type, axis\",\n    [\n        (ValueError, 0.0),\n        (ValueError, 2),\n        (ValueError, -3),\n    ]\n)\n@pytest.mark.parametrize(\n    \"da_func\",\n    [\n        dask_image.ndfilters.prewitt,\n        dask_image.ndfilters.sobel,\n    ]\n)\ndef test_edge_func_params(da_func, err_type, axis):\n    a = np.arange(140.0).reshape(10, 14)\n    d = da.from_array(a, chunks=(5, 7))\n\n    with pytest.raises(err_type):\n        da_func(d, axis)\n\n\n@pytest.mark.parametrize(\n    \"da_func\",\n    [\n        dask_image.ndfilters.prewitt,\n        dask_image.ndfilters.sobel,\n    ]\n)\ndef test_edge_comprehensions(da_func):\n    np.random.seed(0)\n\n    a = np.random.random((3, 12, 14))\n    d = da.from_array(a, chunks=(3, 6, 7))\n\n    l2s = [da_func(d[i]) for i in range(len(d))]\n    l2c = [da_func(d[i])[None] for i in range(len(d))]\n\n    da.utils.assert_eq(np.stack(l2s), da.stack(l2s))\n    da.utils.assert_eq(np.concatenate(l2c), da.concatenate(l2c))\n\n\n@pytest.mark.parametrize(\n    \"axis\",\n    [\n        0,\n        1,\n        2,\n        -1,\n        -2,\n        -3,\n    ]\n)\n@pytest.mark.parametrize(\n    \"da_func, sp_func\",\n    [\n        (dask_image.ndfilters.prewitt, scipy.ndimage.prewitt),\n        (dask_image.ndfilters.sobel, scipy.ndimage.sobel),\n    ]\n)\ndef test_edge_func_compare(da_func, sp_func, axis):\n    s = (10, 11, 12)\n    a = np.arange(float(np.prod(s))).reshape(s)\n    d = da.from_array(a, chunks=(5, 5, 6))\n\n    da.utils.assert_eq(\n        sp_func(a, axis),\n        da_func(d, axis)\n    )\n"
  },
  {
    "path": "tests/test_dask_image/test_ndfilters/test__gaussian.py",
    "content": "#!/usr/bin/env python\n# -*- coding: utf-8 -*-\nimport pytest\nimport numpy as np\nimport scipy.ndimage\n\nimport dask.array as da\n\nimport dask_image.ndfilters\n\n\n@pytest.mark.parametrize(\n    \"err_type, sigma, truncate\",\n    [\n        (RuntimeError, [[1.0]], 4.0),\n        (RuntimeError, [1.0], 4.0),\n        (TypeError, 1.0 + 0.0j, 4.0),\n        (TypeError, 1.0, 4.0 + 0.0j),\n    ]\n)\n@pytest.mark.parametrize(\n    \"da_func\",\n    [\n        dask_image.ndfilters.gaussian,\n        dask_image.ndfilters.gaussian_filter,\n        dask_image.ndfilters.gaussian_gradient_magnitude,\n        dask_image.ndfilters.gaussian_laplace,\n    ]\n)\ndef test_gaussian_filters_params(da_func, err_type, sigma, truncate):\n    a = np.arange(140.0).reshape(10, 14)\n    d = da.from_array(a, chunks=(5, 7))\n\n    with pytest.raises(err_type):\n        da_func(d, sigma, truncate=truncate)\n\n\n@pytest.mark.parametrize(\n    \"sigma, truncate\",\n    [\n        (0.0, 0.0),\n        (0.0, 1.0),\n        (0.0, 4.0),\n        (1.0, 0.0),\n    ]\n)\n@pytest.mark.parametrize(\n    \"order\", [0, 1, 2, 3]\n)\n@pytest.mark.parametrize(\n    \"sp_func, da_func\",\n    [\n        (scipy.ndimage.gaussian_filter, dask_image.ndfilters.gaussian),\n        (scipy.ndimage.gaussian_filter, dask_image.ndfilters.gaussian_filter),\n    ]\n)\ndef test_gaussian_filters_identity(sp_func, da_func, order, sigma, truncate):\n    a = np.arange(140.0).reshape(10, 14)\n    d = da.from_array(a, chunks=(5, 7))\n\n    if order % 2 == 1 and sigma != 0 and truncate == 0:\n        pytest.skip(\n            \"SciPy zeros the result of a Gaussian filter with odd derivatives\"\n            \" when sigma is non-zero, truncate is zero, and derivative is odd.\"\n            \"\\n\\nxref: https://github.com/scipy/scipy/issues/7364\"\n        )\n\n    da.utils.assert_eq(\n        d, da_func(d, sigma, order, truncate=truncate)\n    )\n\n    da.utils.assert_eq(\n        sp_func(a, sigma, order, truncate=truncate),\n        da_func(d, sigma, order, truncate=truncate)\n    )\n\n\n@pytest.mark.parametrize(\n    \"da_func\",\n    [\n        dask_image.ndfilters.gaussian,\n        dask_image.ndfilters.gaussian_filter,\n        dask_image.ndfilters.gaussian_gradient_magnitude,\n        dask_image.ndfilters.gaussian_laplace,\n    ]\n)\ndef test_gaussian_filter_shape_type(da_func):\n    sigma = 1.0\n    truncate = 4.0\n\n    a = np.arange(140.0).reshape(10, 14)\n    d = da.from_array(a, chunks=(5, 7))\n\n    assert all([(type(s) is int) for s in d.shape])\n\n    d2 = da_func(d, sigma=sigma, truncate=truncate)\n\n    assert all([(type(s) is int) for s in d2.shape])\n\n\n@pytest.mark.parametrize(\n    \"da_func\",\n    [\n        dask_image.ndfilters.gaussian,\n        dask_image.ndfilters.gaussian_filter,\n        dask_image.ndfilters.gaussian_gradient_magnitude,\n        dask_image.ndfilters.gaussian_laplace,\n    ]\n)\ndef test_gaussian_filter_comprehensions(da_func):\n    da_wfunc = lambda arr: da_func(arr, 1.0, truncate=4.0)  # noqa: E731\n\n    np.random.seed(0)\n\n    a = np.random.random((3, 12, 14))\n    d = da.from_array(a, chunks=(3, 6, 7))\n\n    l2s = [da_wfunc(d[i]) for i in range(len(d))]\n    l2c = [da_wfunc(d[i])[None] for i in range(len(d))]\n\n    da.utils.assert_eq(np.stack(l2s), da.stack(l2s))\n    da.utils.assert_eq(np.concatenate(l2c), da.concatenate(l2c))\n\n\n@pytest.mark.parametrize(\n    \"sigma, truncate\",\n    [\n        (1.0, 2.0),\n        (1.0, 4.0),\n        (2.0, 2.0),\n        (2.0, 4.0),\n        ((1.0, 2.0), 4.0),\n    ]\n)\n@pytest.mark.parametrize(\n    \"sp_func, da_func\",\n    [\n        (scipy.ndimage.gaussian_filter, dask_image.ndfilters.gaussian),\n        (scipy.ndimage.gaussian_filter, dask_image.ndfilters.gaussian_filter),\n        (scipy.ndimage.gaussian_gradient_magnitude,\n         dask_image.ndfilters.gaussian_gradient_magnitude),\n        (scipy.ndimage.gaussian_laplace,\n         dask_image.ndfilters.gaussian_laplace),\n    ]\n)\ndef test_gaussian_filters_compare(sp_func, da_func, sigma, truncate):\n    s = (100, 110)\n    a = np.arange(float(np.prod(s))).reshape(s)\n    d = da.from_array(a, chunks=(50, 55))\n\n    da.utils.assert_eq(\n        sp_func(a, sigma, truncate=truncate),\n        da_func(d, sigma, truncate=truncate)\n    )\n\n\n@pytest.mark.parametrize(\n    \"sigma, truncate\",\n    [\n        (0.0, 0.0),\n        (1.0, 0.0),\n        (0.0, 1.0),\n        (1.0, 2.0),\n        (1.0, 4.0),\n        (2.0, 2.0),\n        (2.0, 4.0),\n        ((1.0, 2.0), 4.0),\n    ]\n)\n@pytest.mark.parametrize(\n    \"order\", [\n        0,\n        1,\n        2,\n        3,\n        (0, 1),\n        (2, 3),\n    ]\n)\n@pytest.mark.parametrize(\n    \"sp_func, da_func\",\n    [\n        (scipy.ndimage.gaussian_filter, dask_image.ndfilters.gaussian),\n        (scipy.ndimage.gaussian_filter, dask_image.ndfilters.gaussian_filter),\n    ]\n)\ndef test_gaussian_derivative_filters_compare(sp_func, da_func,\n                                             order, sigma, truncate):\n    s = (100, 110)\n    a = np.arange(float(np.prod(s))).reshape(s)\n    d = da.from_array(a, chunks=(50, 55))\n\n    da.utils.assert_eq(\n        sp_func(a, sigma, order, truncate=truncate),\n        da_func(d, sigma, order, truncate=truncate)\n    )\n"
  },
  {
    "path": "tests/test_dask_image/test_ndfilters/test__generic.py",
    "content": "#!/usr/bin/env python\n# -*- coding: utf-8 -*-\nimport pytest\nimport numpy as np\nimport scipy.ndimage\n\nimport dask.array as da\n\nimport dask_image.ndfilters\n\n\n@pytest.mark.parametrize(\n    \"da_func\",\n    [\n        dask_image.ndfilters.generic_filter,\n    ],\n)\n@pytest.mark.parametrize(\n    \"err_type, function, size, footprint, origin\",\n    [\n        (RuntimeError, lambda x: x, None, None, 0),\n        (TypeError, lambda x: x, 1.0, None, 0),\n        (RuntimeError, lambda x: x, (1,), None, 0),\n        (RuntimeError, lambda x: x, [(1,)], None, 0),\n        (RuntimeError, lambda x: x, 1, np.ones((1,)), 0),\n        (RuntimeError, lambda x: x, None, np.ones((1,)), 0),\n        (RuntimeError, lambda x: x, None, np.ones((1, 0)), 0),\n        (RuntimeError, lambda x: x, 1, None, (0,)),\n        (RuntimeError, lambda x: x, 1, None, [(0,)]),\n        (ValueError, lambda x: x, 1, None, 1),\n        (TypeError, lambda x: x, 1, None, 0.0),\n        (TypeError, lambda x: x, 1, None, (0.0, 0.0)),\n        (TypeError, lambda x: x, 1, None, 1 + 0j),\n        (TypeError, lambda x: x, 1, None, (0 + 0j, 1 + 0j)),\n    ],\n)\ndef test_generic_filters_params(da_func, err_type, function, size, footprint,\n                                origin):\n    a = np.arange(140.0).reshape(10, 14)\n    d = da.from_array(a, chunks=(5, 7))\n\n    with pytest.raises(err_type):\n        da_func(d, function, size=size, footprint=footprint, origin=origin)\n\n\n@pytest.mark.parametrize(\n    \"da_func\",\n    [\n        dask_image.ndfilters.generic_filter,\n    ],\n)\ndef test_generic_filter_shape_type(da_func):\n    function = lambda x: x  # noqa: E731\n    size = 1\n\n    a = np.arange(140.0).reshape(10, 14)\n    d = da.from_array(a, chunks=(5, 7))\n\n    assert all([(type(s) is int) for s in d.shape])\n\n    d2 = da_func(d, function, size=size)\n\n    assert all([(type(s) is int) for s in d2.shape])\n\n\n@pytest.mark.parametrize(\n    \"sp_func, da_func\",\n    [(scipy.ndimage.generic_filter, dask_image.ndfilters.generic_filter)],\n)\n@pytest.mark.parametrize(\n    \"function, size, footprint\",\n    [\n        (lambda x: x[0], 1, None),\n        (lambda x: x[0], (1, 1), None),\n        (lambda x: x[0], None, np.ones((1, 1))),\n    ],\n)\ndef test_generic_filter_identity(sp_func, da_func, function, size, footprint):\n    a = np.arange(140.0).reshape(10, 14)\n    d = da.from_array(a, chunks=(5, 7))\n\n    da.utils.assert_eq(d, da_func(d, function, size=size, footprint=footprint))\n\n    da.utils.assert_eq(\n        sp_func(a, function, size=size, footprint=footprint),\n        da_func(d, function, size=size, footprint=footprint),\n    )\n\n\n@pytest.mark.parametrize(\n    \"da_func\",\n    [\n        dask_image.ndfilters.generic_filter,\n    ],\n)\ndef test_generic_filter_comprehensions(da_func):\n    da_wfunc = lambda arr: da_func(arr, lambda x: x[0], 1)  # noqa: E731\n\n    np.random.seed(0)\n\n    a = np.random.random((3, 12, 14))\n    d = da.from_array(a, chunks=(3, 6, 7))\n\n    l2s = [da_wfunc(d[i]) for i in range(len(d))]\n    l2c = [da_wfunc(d[i])[None] for i in range(len(d))]\n\n    da.utils.assert_eq(np.stack(l2s), da.stack(l2s))\n    da.utils.assert_eq(np.concatenate(l2c), da.concatenate(l2c))\n\n\n@pytest.mark.parametrize(\n    \"sp_func, da_func\",\n    [(scipy.ndimage.generic_filter, dask_image.ndfilters.generic_filter)],\n)\n@pytest.mark.parametrize(\n    \"function, size, footprint, origin\",\n    [\n        (lambda x: (np.array(x) ** 2).sum(), 2, None, 0),\n        (lambda x: (np.array(x) ** 2).sum(), None, np.ones((2, 3)), 0),\n        (lambda x: (np.array(x) ** 2).sum(), None, np.ones((2, 3)), (0, 1)),\n        (lambda x: (np.array(x) ** 2).sum(), None, np.ones((2, 3)), (0, -1)),\n        (\n            lambda x: (np.array(x) ** 2).sum(),\n            None,\n            (np.mgrid[-2: 2 + 1, -2: 2 + 1] ** 2).sum(axis=0) < 2.5 ** 2,\n            0,\n        ),\n        (\n            lambda x: (np.array(x) ** 2).sum(),\n            None,\n            (np.mgrid[-2: 2 + 1, -2: 2 + 1] ** 2).sum(axis=0) < 2.5 ** 2,\n            (1, 2),\n        ),\n        (\n            lambda x: (np.array(x) ** 2).sum(),\n            None,\n            (np.mgrid[-2: 2 + 1, -2: 2 + 1] ** 2).sum(axis=0) < 2.5 ** 2,\n            (-1, -2),\n        ),\n        (lambda x: (np.array(x) ** 2).sum(), 5, None, 0),\n        (lambda x: (np.array(x) ** 2).sum(), 7, None, 0),\n        (lambda x: (np.array(x) ** 2).sum(), 8, None, 0),\n        (lambda x: (np.array(x) ** 2).sum(), 10, None, 0),\n        (lambda x: (np.array(x) ** 2).sum(), 5, None, 2),\n        (lambda x: (np.array(x) ** 2).sum(), 5, None, -2),\n    ],\n)\ndef test_generic_filter_compare(sp_func, da_func, function, size, footprint,\n                                origin):\n    a = np.arange(140.0).reshape(10, 14)\n    d = da.from_array(a, chunks=(5, 7))\n\n    da.utils.assert_eq(\n        sp_func(a, function, size=size, footprint=footprint, origin=origin),\n        da_func(d, function, size=size, footprint=footprint, origin=origin),\n    )\n"
  },
  {
    "path": "tests/test_dask_image/test_ndfilters/test__order.py",
    "content": "#!/usr/bin/env python\n# -*- coding: utf-8 -*-\nimport pytest\nimport numpy as np\nimport scipy.ndimage\n\nimport dask.array as da\n\nimport dask_image.ndfilters\n\n\n@pytest.mark.parametrize(\n    \"da_func, extra_kwargs\",\n    [\n        (dask_image.ndfilters.minimum_filter, {}),\n        (dask_image.ndfilters.median_filter, {}),\n        (dask_image.ndfilters.maximum_filter, {}),\n        (dask_image.ndfilters.rank_filter, {\"rank\": 0}),\n        (dask_image.ndfilters.percentile_filter, {\"percentile\": 0}),\n    ]\n)\n@pytest.mark.parametrize(\n    \"err_type, size, footprint, origin\",\n    [\n        (RuntimeError, None, None, 0),\n        (TypeError, 1.0, None, 0),\n        (RuntimeError, (1,), None, 0),\n        (RuntimeError, [(1,)], None, 0),\n        (RuntimeError, 1, np.ones((1,)), 0),\n        (RuntimeError, None, np.ones((1,)), 0),\n        (RuntimeError, None, np.ones((1, 0)), 0),\n        (RuntimeError, 1, None, (0,)),\n        (RuntimeError, 1, None, [(0,)]),\n        (ValueError, 1, None, 1),\n        (TypeError, 1, None, 0.0),\n        (TypeError, 1, None, (0.0, 0.0)),\n        (TypeError, 1, None, 1+0j),\n        (TypeError, 1, None, (0+0j, 1+0j)),\n    ]\n)\ndef test_order_filter_params(da_func,\n                             extra_kwargs,\n                             err_type,\n                             size,\n                             footprint,\n                             origin):\n    a = np.arange(140.0).reshape(10, 14)\n    d = da.from_array(a, chunks=(5, 7))\n\n    with pytest.raises(err_type):\n        da_func(d,\n                size=size,\n                footprint=footprint,\n                origin=origin,\n                **extra_kwargs)\n\n\n@pytest.mark.parametrize(\n    \"da_func, extra_kwargs\",\n    [\n        (dask_image.ndfilters.minimum_filter, {}),\n        (dask_image.ndfilters.median_filter, {}),\n        (dask_image.ndfilters.maximum_filter, {}),\n        (dask_image.ndfilters.rank_filter, {\"rank\": 0}),\n        (dask_image.ndfilters.percentile_filter, {\"percentile\": 0}),\n    ]\n)\ndef test_ordered_filter_shape_type(da_func,\n                                   extra_kwargs):\n    size = 1\n\n    a = np.arange(140.0).reshape(10, 14)\n    d = da.from_array(a, chunks=(5, 7))\n\n    assert all([(type(s) is int) for s in d.shape])\n\n    d2 = da_func(d, size=size, **extra_kwargs)\n\n    assert all([(type(s) is int) for s in d2.shape])\n\n\n@pytest.mark.parametrize(\n    \"sp_func, da_func, extra_kwargs\",\n    [\n        (scipy.ndimage.minimum_filter,\n         dask_image.ndfilters.minimum_filter, {}),\n        (scipy.ndimage.median_filter, dask_image.ndfilters.median_filter, {}),\n        (scipy.ndimage.maximum_filter,\n         dask_image.ndfilters.maximum_filter, {}),\n        (scipy.ndimage.rank_filter,\n         dask_image.ndfilters.rank_filter, {\"rank\": 0}),\n        (scipy.ndimage.percentile_filter,\n         dask_image.ndfilters.percentile_filter, {\"percentile\": 0}),\n    ]\n)\n@pytest.mark.parametrize(\n    \"size, footprint\",\n    [\n        (1, None),\n        ((1, 1), None),\n        (None, np.ones((1, 1))),\n    ]\n)\ndef test_ordered_filter_identity(sp_func,\n                                 da_func,\n                                 extra_kwargs,\n                                 size,\n                                 footprint):\n    a = np.arange(140.0).reshape(10, 14)\n    d = da.from_array(a, chunks=(5, 7))\n\n    da.utils.assert_eq(\n        d, da_func(d, size=size, footprint=footprint, **extra_kwargs)\n    )\n\n    da.utils.assert_eq(\n        sp_func(a, size=size, footprint=footprint, **extra_kwargs),\n        da_func(d, size=size, footprint=footprint, **extra_kwargs)\n    )\n\n\n@pytest.mark.parametrize(\n    \"da_func, kwargs\",\n    [\n        (dask_image.ndfilters.minimum_filter, {\"size\": 1}),\n        (dask_image.ndfilters.median_filter, {\"size\": 1}),\n        (dask_image.ndfilters.maximum_filter, {\"size\": 1}),\n        (dask_image.ndfilters.rank_filter, {\"size\": 1, \"rank\": 0}),\n        (dask_image.ndfilters.percentile_filter, {\"size\": 1, \"percentile\": 0}),\n    ]\n)\ndef test_order_comprehensions(da_func, kwargs):\n    np.random.seed(0)\n\n    a = np.random.random((3, 12, 14))\n    d = da.from_array(a, chunks=(3, 6, 7))\n\n    l2s = [da_func(d[i], **kwargs) for i in range(len(d))]\n    l2c = [da_func(d[i], **kwargs)[None] for i in range(len(d))]\n\n    da.utils.assert_eq(np.stack(l2s), da.stack(l2s))\n    da.utils.assert_eq(np.concatenate(l2c), da.concatenate(l2c))\n\n\n@pytest.mark.parametrize(\n    \"sp_func, da_func, extra_kwargs\",\n    [\n        (scipy.ndimage.minimum_filter,\n         dask_image.ndfilters.minimum_filter, {}),\n        (scipy.ndimage.median_filter, dask_image.ndfilters.median_filter, {}),\n        (scipy.ndimage.maximum_filter,\n         dask_image.ndfilters.maximum_filter, {}),\n        (scipy.ndimage.rank_filter,\n         dask_image.ndfilters.rank_filter, {\"rank\": 1}),\n        (scipy.ndimage.percentile_filter,\n         dask_image.ndfilters.percentile_filter, {\"percentile\": 10}),\n    ]\n)\n@pytest.mark.parametrize(\n    \"size, footprint, origin\",\n    [\n        (2, None, 0),\n        (None, np.ones((2, 3)), 0),\n        (None, np.ones((2, 3)), (0, 1)),\n        (None, np.ones((2, 3)), (0, -1)),\n        (None, (np.mgrid[-2: 2+1, -2: 2+1]**2).sum(axis=0) < 2.5**2, 0),\n        (None, (np.mgrid[-2: 2+1, -2: 2+1]**2).sum(axis=0) < 2.5**2, (1, 2)),\n        (None, (np.mgrid[-2: 2+1, -2: 2+1]**2).sum(axis=0) < 2.5**2, (-1, -2)),\n        (5, None, 0),\n        (7, None, 0),\n        (8, None, 0),\n        (10, None, 0),\n        (5, None, 2),\n        (5, None, -2),\n    ]\n)\ndef test_ordered_filter_compare(sp_func,\n                                da_func,\n                                extra_kwargs,\n                                size,\n                                footprint,\n                                origin):\n    a = np.arange(140.0).reshape(10, 14)\n    d = da.from_array(a, chunks=(5, 7))\n\n    da.utils.assert_eq(\n        sp_func(\n            a, size=size, footprint=footprint, origin=origin, **extra_kwargs\n        ),\n        da_func(\n            d, size=size, footprint=footprint, origin=origin, **extra_kwargs\n        )\n    )\n"
  },
  {
    "path": "tests/test_dask_image/test_ndfilters/test__smooth.py",
    "content": "#!/usr/bin/env python\n# -*- coding: utf-8 -*-\nimport pytest\nimport numpy as np\nimport scipy.ndimage\n\nimport dask.array as da\n\nimport dask_image.ndfilters\n\n\n@pytest.mark.parametrize(\n    \"err_type, size, origin\",\n    [\n        (TypeError, 3.0, 0),\n        (TypeError, 3, 0.0),\n        (RuntimeError, [3], 0),\n        (RuntimeError, 3, [0]),\n        (RuntimeError, [[3]], 0),\n        (RuntimeError, 3, [[0]]),\n    ]\n)\ndef test_uniform_filter_params(err_type, size, origin):\n    a = np.arange(140.0).reshape(10, 14)\n    d = da.from_array(a, chunks=(5, 7))\n\n    with pytest.raises(err_type):\n        dask_image.ndfilters.uniform_filter(d, size, origin=origin)\n\n\ndef test_uniform_shape_type():\n    size = 1\n    origin = 0\n\n    a = np.arange(140.0).reshape(10, 14)\n    d = da.from_array(a, chunks=(5, 7))\n\n    assert all([(type(s) is int) for s in d.shape])\n\n    d2 = dask_image.ndfilters.uniform_filter(d, size, origin=origin)\n\n    assert all([(type(s) is int) for s in d2.shape])\n\n\ndef test_uniform_comprehensions():\n    da_func = lambda arr: dask_image.ndfilters.uniform_filter(arr, 1, origin=0)  # noqa: E731, E501\n\n    np.random.seed(0)\n\n    a = np.random.random((3, 12, 14))\n    d = da.from_array(a, chunks=(3, 6, 7))\n\n    l2s = [da_func(d[i]) for i in range(len(d))]\n    l2c = [da_func(d[i])[None] for i in range(len(d))]\n\n    da.utils.assert_eq(np.stack(l2s), da.stack(l2s))\n    da.utils.assert_eq(np.concatenate(l2c), da.concatenate(l2c))\n\n\n@pytest.mark.parametrize(\n    \"size, origin\",\n    [\n        (1, 0),\n    ]\n)\ndef test_uniform_identity(size, origin):\n    a = np.arange(140.0).reshape(10, 14)\n    d = da.from_array(a, chunks=(5, 7))\n\n    da.utils.assert_eq(\n        d, dask_image.ndfilters.uniform_filter(d, size, origin=origin)\n    )\n\n    da.utils.assert_eq(\n        scipy.ndimage.uniform_filter(a, size, origin=origin),\n        dask_image.ndfilters.uniform_filter(d, size, origin=origin)\n    )\n\n\n@pytest.mark.parametrize(\n    \"size, origin\",\n    [\n        (2, 0),\n        (3, 0),\n        (3, 1),\n        (3, (1, 0)),\n        ((1, 2), 0),\n        ((3, 2), (1, 0)),\n    ]\n)\ndef test_uniform_compare(size, origin):\n    s = (100, 110)\n    a = np.arange(float(np.prod(s))).reshape(s)\n    d = da.from_array(a, chunks=(50, 55))\n\n    da.utils.assert_eq(\n        scipy.ndimage.uniform_filter(a, size, origin=origin),\n        dask_image.ndfilters.uniform_filter(d, size, origin=origin)\n    )\n"
  },
  {
    "path": "tests/test_dask_image/test_ndfilters/test__threshold.py",
    "content": "import dask.array as da\nimport numpy as np\nfrom numpy.testing import assert_equal\nimport pytest\n\nfrom dask_image.ndfilters import threshold_local\n\n\n@pytest.fixture\ndef simple_test_image():\n    image = da.from_array(np.array(\n        [[0, 0, 1, 3, 5],\n         [0, 1, 4, 3, 4],\n         [1, 2, 5, 4, 1],\n         [2, 4, 5, 2, 1],\n         [4, 5, 1, 0, 0]], dtype=int), chunks=(5, 5))\n    return image\n\n\n# ==================================================\n# Test Threshold Filters\n# ==================================================\n\n@pytest.mark.parametrize('block_size', [\n    3,\n    [3, 3],\n    np.array([3, 3]),\n    da.from_array(np.array([3, 3]), chunks=1),\n    da.from_array(np.array([3, 3]), chunks=2),\n])\ndef test_threshold_local_gaussian(simple_test_image, block_size):\n    ref = np.array(\n        [[False, False, False, False,  True],\n            [False, False,  True, False,  True],\n            [False, False,  True,  True, False],\n            [False,  True,  True, False, False],\n            [True,  True, False, False, False]]\n    )\n    out = threshold_local(simple_test_image, block_size, method='gaussian')\n    assert_equal(ref, (simple_test_image > out).compute())\n\n    out = threshold_local(\n        simple_test_image, block_size, method='gaussian', param=1./3.\n    )\n    assert_equal(ref, (simple_test_image > out).compute())\n\n\n@pytest.mark.parametrize('block_size', [\n    3,\n    [3, 3],\n    np.array([3, 3]),\n    da.from_array(np.array([3, 3]), chunks=1),\n    da.from_array(np.array([3, 3]), chunks=2),\n])\ndef test_threshold_local_mean(simple_test_image, block_size):\n    ref = np.array(\n        [[False, False, False, False,  True],\n            [False, False,  True, False,  True],\n            [False, False,  True,  True, False],\n            [False,  True,  True, False, False],\n            [True,  True, False, False, False]]\n    )\n    out = threshold_local(simple_test_image, block_size, method='mean')\n    assert_equal(ref, (simple_test_image > out).compute())\n\n\n@pytest.mark.parametrize('block_size', [\n    3,\n    [3, 3],\n    np.array([3, 3]),\n    da.from_array(np.array([3, 3]), chunks=1),\n    da.from_array(np.array([3, 3]), chunks=2),\n])\ndef test_threshold_local_median(simple_test_image, block_size):\n    ref = np.array(\n        [[False, False, False, False,  True],\n            [False, False,  True, False, False],\n            [False, False,  True, False, False],\n            [False, False,  True,  True, False],\n            [False,  True, False, False, False]]\n    )\n    out = threshold_local(simple_test_image, block_size, method='median')\n    assert_equal(ref, (simple_test_image > out).compute())\n\n\n# ==================================================\n# Test Generic Filters\n# ==================================================\n\ndef test_threshold_local_generic(simple_test_image):\n    ref = np.array(\n        [[1.,  7., 16., 29., 37.],\n         [5., 14., 23., 30., 30.],\n         [13., 24., 30., 29., 21.],\n         [25., 29., 28., 19., 10.],\n         [34., 31., 23., 10.,  4.]]\n    )\n    unchanged = threshold_local(\n        simple_test_image, 1, method='generic', param=sum\n    )\n    out = threshold_local(simple_test_image, 3, method='generic', param=sum)\n    assert np.allclose(unchanged.compute(), simple_test_image.compute())\n    assert np.allclose(out.compute(), ref)\n\n\ndef test_threshold_local_generic_invalid(simple_test_image):\n    expected_error_message = \"Must include a valid function to use as \"\n    \"the 'param' keyword argument.\"\n    with pytest.raises(ValueError) as e:\n        threshold_local(simple_test_image, 3, method='generic', param='sum')\n        assert e == expected_error_message\n\n\n# ==================================================\n# Test Invalid Arguments\n# ==================================================\n\n@pytest.mark.parametrize(\"method, block_size, error_type\", [\n    ('median', np.nan, TypeError),\n])\ndef test_nan_blocksize(simple_test_image, method, block_size, error_type):\n    with pytest.raises(error_type):\n        threshold_local(simple_test_image, block_size, method=method)\n\n\ndef test_invalid_threshold_method(simple_test_image):\n    with pytest.raises(ValueError):\n        threshold_local(simple_test_image, 3, method='invalid')\n"
  },
  {
    "path": "tests/test_dask_image/test_ndfilters/test__utils.py",
    "content": "#!/usr/bin/env python\n# -*- coding: utf-8 -*-\nimport inspect\n\nimport pytest\nimport numpy as np\n\nfrom dask_image.ndfilters import _utils\n\n\ndef test__get_docstring():\n    f = lambda: 0  # noqa: E731\n\n    result = _utils._get_docstring(f)\n\n    expected = \"\"\"\n    Wrapped copy of \"{mod_name}.{func_name}\"\n\n\n    Excludes the output parameter as it would not work with Dask arrays.\n\n\n    Original docstring:\n\n    {doc}\n    \"\"\".format(\n        mod_name=inspect.getmodule(f).__name__,\n        func_name=f.__name__,\n        doc=\"\",\n    )\n\n    assert result == expected\n\n\ndef test__update_wrapper():\n    f = lambda: 0  # noqa: E731\n\n    @_utils._update_wrapper(f)\n    def g():\n        return f()\n\n    assert f.__name__ == g.__name__\n\n    expected = \"\"\"\n    Wrapped copy of \"{mod_name}.{func_name}\"\n\n\n    Excludes the output parameter as it would not work with Dask arrays.\n\n\n    Original docstring:\n\n    {doc}\n    \"\"\".format(\n        mod_name=inspect.getmodule(g).__name__,\n        func_name=g.__name__,\n        doc=\"\",\n    )\n\n    assert g.__doc__ == expected\n\n\n@pytest.mark.parametrize(\n    \"err_type, ndim, depth, boundary\",\n    [\n        (TypeError, lambda: 0, 1, None),\n        (TypeError, 1.0, 1, None),\n        (ValueError, -1, 1, None),\n        (TypeError, 1, lambda: 0, None),\n        (TypeError, 1, 1.0, None),\n        (ValueError, 1, -1, None),\n        (ValueError, 1, (1, 1), None),\n        (ValueError, 1, {0: 1, 1: 1}, None),\n        (TypeError, 1, {1}, None),\n        (TypeError, 1, 1, 1),\n        (ValueError, 1, 1, (None, None)),\n        (ValueError, 1, 1, {0: None, 1: None}),\n        (TypeError, 1, 1, (1,)),\n        (TypeError, 1, 1, {1}),\n    ]\n)\ndef test_errs__get_depth_boundary(err_type, ndim, depth, boundary):\n    with pytest.raises(err_type):\n        _utils._get_depth_boundary(ndim, depth, boundary)\n\n\n@pytest.mark.parametrize(\n    \"err_type, ndim, size\",\n    [\n        (TypeError, 1.0, 1),\n        (RuntimeError, 1, [[1]]),\n        (TypeError, 1, 1.0),\n        (TypeError, 1, [1.0]),\n        (RuntimeError, 1, [1, 1]),\n    ]\n)\ndef test_errs__get_size(err_type, ndim, size):\n    with pytest.raises(err_type):\n        _utils._get_size(ndim, size)\n\n\n@pytest.mark.parametrize(\n    \"err_type, size, origin\",\n    [\n        (TypeError, [1], 1.0),\n        (TypeError, [1], [1.0]),\n        (RuntimeError, [1], [[1]]),\n        (RuntimeError, [1], [1, 1]),\n        (ValueError, [1], [2]),\n    ]\n)\ndef test_errs__get_origin(err_type, size, origin):\n    with pytest.raises(err_type):\n        _utils._get_origin(size, origin)\n\n\n@pytest.mark.parametrize(\n    \"err_type, ndim, size, footprint\",\n    [\n        (RuntimeError, 1, None, None),\n        (RuntimeError, 1, [2], np.ones((2,), dtype=bool)),\n        (RuntimeError, 1, None, np.ones((1, 2), dtype=bool)),\n        (RuntimeError, 1, None, np.ones([0], dtype=bool)),\n    ]\n)\ndef test_errs__get_footprint(err_type, ndim, size, footprint):\n    with pytest.raises(err_type):\n        _utils._get_footprint(ndim, size=size, footprint=footprint)\n\n\n@pytest.mark.parametrize(\n    \"expected, ndim, depth, boundary\",\n    [\n        (({0: 0}, {0: \"none\"}), 1, 0, \"none\"),\n        (({0: 0}, {0: \"reflect\"}), 1, 0, \"reflect\"),\n        (({0: 0}, {0: \"periodic\"}), 1, 0, \"periodic\"),\n        (({0: 1}, {0: \"none\"}), 1, 1, \"none\"),\n    ]\n)\ndef test__get_depth_boundary(expected, ndim, depth, boundary):\n    assert expected == _utils._get_depth_boundary(ndim, depth, boundary)\n\n\n@pytest.mark.parametrize(\n    \"expected, ndim, size\",\n    [\n        ((1,), 1, 1),\n        ((3, 3), 2, 3),\n        ((2, 4), 2, (2, 4)),\n    ]\n)\ndef test__get_size(expected, ndim, size):\n    assert expected == _utils._get_size(ndim, size)\n\n\n@pytest.mark.parametrize(\n    \"expected, size, origin\",\n    [\n        ((0,), (1,), 0),\n        ((1,), (3,), 1),\n        ((1, 2), (3, 5), (1, 2)),\n    ]\n)\ndef test__get_origin(expected, size, origin):\n    assert expected == _utils._get_origin(size, origin)\n\n\n@pytest.mark.parametrize(\n    \"expected, size, origin\",\n    [\n        ((0,), (1,), 0),\n        ((1,), (3,), 0),\n        ((2,), (3,), 1),\n        ((2, 4), (3, 5), (1, 2)),\n    ]\n)\ndef test__get_depth(expected, size, origin):\n    assert expected == _utils._get_depth(size, origin)\n\n\n@pytest.mark.parametrize(\n    \"expected, ndim, size, footprint\",\n    [\n        (np.ones((2,), dtype=bool), 1, 2, None),\n        (np.ones((2,), dtype=bool), 1, None, np.ones((2,), dtype=bool)),\n    ]\n)\ndef test__get_footprint(expected, ndim, size, footprint):\n    assert (expected == _utils._get_footprint(ndim, size, footprint)).all()\n"
  },
  {
    "path": "tests/test_dask_image/test_ndfilters/test_cupy_ndfilters.py",
    "content": "#!/usr/bin/env python\n# -*- coding: utf-8 -*-\n\nimport dask.array as da\nimport numpy as np\nimport pytest\n\nimport dask_image.ndfilters\n\ncupy = pytest.importorskip(\"cupy\", minversion=\"8.0.0\")\n\n\n@pytest.fixture\ndef array():\n    s = (10, 10)\n    a = da.from_array(cupy.arange(int(np.prod(s)),\n                      dtype=cupy.float32).reshape(s), chunks=5)\n    return a\n\n\n@pytest.mark.cupy\n@pytest.mark.parametrize(\"func\", [\n    dask_image.ndfilters.convolve,\n    dask_image.ndfilters.correlate,\n])\ndef test_cupy_conv(array, func):\n    \"\"\"Test convolve & correlate filters with cupy input arrays.\"\"\"\n    weights = cupy.ones(array.ndim * (3,), dtype=cupy.float32)\n    result = func(array, weights)\n    result.compute()\n\n\n@pytest.mark.cupy\n@pytest.mark.parametrize(\"func\", [\n    dask_image.ndfilters.laplace,\n])\ndef test_cupy_diff(array, func):\n    result = func(array)\n    result.compute()\n\n\n@pytest.mark.cupy\n@pytest.mark.parametrize(\"func\", [\n    dask_image.ndfilters.prewitt,\n    dask_image.ndfilters.sobel,\n])\ndef test_cupy_edge(array, func):\n    result = func(array)\n    result.compute()\n\n\n@pytest.mark.cupy\n@pytest.mark.parametrize(\"func\", [\n    dask_image.ndfilters.gaussian,\n    dask_image.ndfilters.gaussian_filter,\n    dask_image.ndfilters.gaussian_gradient_magnitude,\n    dask_image.ndfilters.gaussian_laplace,\n])\ndef test_cupy_gaussian(array, func):\n    sigma = 1\n    result = func(array, sigma)\n    result.compute()\n\n\n@pytest.mark.parametrize(\n    \"size, footprint\",\n    [\n        (1, None),\n        ((1, 1), None),\n        (None, np.ones((1, 1))),\n    ]\n)\ndef test_cupy_generic(array, size, footprint):\n    my_sum = cupy.ReductionKernel(\n        'T x', 'T out', 'x', 'a + b', 'out = a', '0', 'my_sum')\n    result = dask_image.ndfilters.generic_filter(array, my_sum, size=size,\n                                                 footprint=footprint)\n    result.compute()\n\n\n@pytest.mark.cupy\n@pytest.mark.parametrize(\"func, extra_arg, size\", [\n    (dask_image.ndfilters.minimum_filter, None, 3),\n    (dask_image.ndfilters.median_filter, None, 3),\n    (dask_image.ndfilters.maximum_filter, None, 3),\n    (dask_image.ndfilters.rank_filter, 5, 3),\n    (dask_image.ndfilters.percentile_filter, 50, 3),\n])\ndef test_cupy_order(array, func, extra_arg, size):\n    if extra_arg is not None:\n        result = func(array, extra_arg, size=size)\n    else:\n        result = func(array, size=size)\n    result.compute()\n\n\n@pytest.mark.cupy\n@pytest.mark.parametrize(\"func\", [\n    dask_image.ndfilters.uniform_filter,\n])\ndef test_cupy_smooth(array, func):\n    result = func(array)\n    result.compute()\n"
  },
  {
    "path": "tests/test_dask_image/test_ndfilters/test_cupy_threshold.py",
    "content": "#!/usr/bin/env python\n# -*- coding: utf-8 -*-\n\nimport dask.array as da\nimport numpy as np\nimport pytest\n\nfrom dask_image.ndfilters import threshold_local\n\ncupy = pytest.importorskip(\"cupy\", minversion=\"5.0.0\")\n\n\n@pytest.fixture\ndef simple_test_image():\n    image = da.from_array(cupy.array(\n        [[0, 0, 1, 3, 5],\n         [0, 1, 4, 3, 4],\n         [1, 2, 5, 4, 1],\n         [2, 4, 5, 2, 1],\n         [4, 5, 1, 0, 0]], dtype=int), chunks=(5, 5))\n    return image\n\n\n# ==================================================\n# Test Threshold Filters\n# ==================================================\n\n@pytest.mark.cupy\n@pytest.mark.parametrize('block_size', [\n    3,\n    [3, 3],\n    np.array([3, 3]),\n    da.from_array(np.array([3, 3]), chunks=1),\n    da.from_array(np.array([3, 3]), chunks=2),\n])\ndef test_threshold_local_gaussian(simple_test_image, block_size):\n    ref = np.array(\n        [[False, False, False, False,  True],\n            [False, False,  True, False,  True],\n            [False, False,  True,  True, False],\n            [False,  True,  True, False, False],\n            [True,  True, False, False, False]]\n    )\n    out = threshold_local(simple_test_image, block_size, method='gaussian')\n    cupy.testing.assert_array_equal(ref, (simple_test_image > out).compute())\n\n    out = threshold_local(\n        simple_test_image, block_size, method='gaussian', param=1./3.\n    )\n    cupy.testing.assert_array_equal(ref, (simple_test_image > out).compute())\n\n\n@pytest.mark.cupy\n@pytest.mark.parametrize('block_size', [\n    3,\n    [3, 3],\n    np.array([3, 3]),\n    da.from_array(np.array([3, 3]), chunks=1),\n    da.from_array(np.array([3, 3]), chunks=2),\n])\ndef test_threshold_local_mean(simple_test_image, block_size):\n    ref = cupy.array(\n        [[False, False, False, False,  True],\n            [False, False,  True, False,  True],\n            [False, False,  True,  True, False],\n            [False,  True,  True, False, False],\n            [True,  True, False, False, False]]\n    )\n    out = threshold_local(simple_test_image, block_size, method='mean')\n    cupy.testing.assert_array_equal(ref, (simple_test_image > out).compute())\n\n\n@pytest.mark.cupy\n@pytest.mark.parametrize('block_size', [\n    3,\n    [3, 3],\n    np.array([3, 3]),\n    da.from_array(np.array([3, 3]), chunks=1),\n    da.from_array(np.array([3, 3]), chunks=2),\n])\ndef test_threshold_local_median(simple_test_image, block_size):\n    ref = cupy.array(\n        [[False, False, False, False,  True],\n            [False, False,  True, False, False],\n            [False, False,  True, False, False],\n            [False, False,  True,  True, False],\n            [False,  True, False, False, False]]\n    )\n    out = threshold_local(simple_test_image, block_size, method='median')\n    cupy.testing.assert_array_equal(ref, (simple_test_image > out).compute())\n\n\n# ==================================================\n# Test Generic Filters\n# ==================================================\n\ndef test_threshold_local_generic(simple_test_image):\n    ref = cupy.array(\n        [[1.,  7., 16., 29., 37.],\n            [5., 14., 23., 30., 30.],\n            [13., 24., 30., 29., 21.],\n            [25., 29., 28., 19., 10.],\n            [34., 31., 23., 10.,  4.]]\n    )\n    my_sum = cupy.ReductionKernel(\n        'T x', 'T out', 'x', 'a + b', 'out = a', '0', 'my_sum')\n    unchanged = threshold_local(simple_test_image, 1, method='generic', param=my_sum)  # noqa: E501\n    out = threshold_local(simple_test_image, 3, method='generic', param=my_sum)\n    assert cupy.allclose(unchanged.compute(), simple_test_image.compute())\n    assert cupy.allclose(out.compute(), ref)\n\n\ndef test_threshold_local_generic_invalid(simple_test_image):\n    expected_error_message = \"Must include a valid function to use as \"\n    \"the 'param' keyword argument.\"\n    with pytest.raises(ValueError) as e:\n        threshold_local(simple_test_image, 3, method='generic', param='sum')\n        assert e == expected_error_message\n\n\n# ==================================================\n# Test Invalid Arguments\n# ==================================================\n\n@pytest.mark.parametrize(\"method, block_size, error_type\", [\n    ('median', cupy.nan, TypeError),\n])\ndef test_nan_blocksize(simple_test_image, method, block_size, error_type):\n    with pytest.raises(error_type):\n        threshold_local(simple_test_image, block_size, method=method)\n\n\ndef test_invalid_threshold_method(simple_test_image):\n    with pytest.raises(ValueError):\n        threshold_local(simple_test_image, 3, method='invalid')\n"
  },
  {
    "path": "tests/test_dask_image/test_ndfourier/test__utils.py",
    "content": "# -*- coding: utf-8 -*-\nimport numbers\n\nimport pytest\n\nimport dask.array as da\n\nimport dask_image.ndfourier._utils\n\n\n@pytest.mark.parametrize(\n    \"a, s, n, axis\", [\n        (da.ones((3, 4), chunks=(3, 4)), da.ones((2,), chunks=(2,)), -1, -1),\n        (da.ones((3, 4), dtype='i8', chunks=(3, 4)),\n         da.ones((2,), dtype='i8', chunks=(2,)), -1, -1),\n    ]\n)\ndef test_norm_args(a, s, n, axis):\n    a2, s2, n2, axis2 = dask_image.ndfourier._utils._norm_args(\n        a, s, n=n, axis=axis\n    )\n\n    assert isinstance(a2, da.Array)\n    assert isinstance(s2, da.Array)\n\n    assert issubclass(a2.real.dtype.type, numbers.Real)\n    assert issubclass(s2.dtype.type, numbers.Real)\n"
  },
  {
    "path": "tests/test_dask_image/test_ndfourier/test_core.py",
    "content": "#!/usr/bin/env python\n# -*- coding: utf-8 -*-\nimport numbers\n\nimport pytest\nimport numpy as np\nimport scipy.ndimage\n\nimport dask.array as da\nimport dask_image.ndfourier\n\n\n@pytest.mark.parametrize(\n    \"err_type, s, n\",\n    [\n        (NotImplementedError, 0.0, 0),\n        (TypeError, 0.0 + 0.0j, 0),\n        (TypeError, {}, 0),\n        (RuntimeError, [0.0], 0),\n        (RuntimeError, [[0.0], [0.0]], 0),\n        (TypeError, [0.0, 0.0 + 0.0j], 0),\n        (NotImplementedError, 0, 0),\n    ]\n)\n@pytest.mark.parametrize(\n    \"funcname\",\n    [\n        \"fourier_shift\",\n        \"fourier_gaussian\",\n        \"fourier_uniform\",\n    ]\n)\ndef test_fourier_filter_err(funcname, err_type, s, n):\n    da_func = getattr(dask_image.ndfourier, funcname)\n\n    a = np.arange(140.0).reshape(10, 14).astype(complex)\n    d = da.from_array(a, chunks=(5, 7))\n\n    with pytest.raises(err_type):\n        da_func(d, s, n)\n\n\n@pytest.mark.parametrize(\n    \"s\",\n    [\n        0,\n        (0, 0),\n    ]\n)\n@pytest.mark.parametrize(\n    \"funcname\",\n    [\n        \"fourier_shift\",\n        \"fourier_gaussian\",\n    ]\n)\ndef test_fourier_filter_identity(funcname, s):\n    da_func = getattr(dask_image.ndfourier, funcname)\n    sp_func = getattr(scipy.ndimage, funcname)\n\n    a = np.arange(140.0).reshape(10, 14).astype(complex)\n    d = da.from_array(a, chunks=(5, 7))\n\n    r_a = sp_func(a, s)\n    r_d = da_func(d, s)\n\n    assert d.chunks == r_d.chunks\n\n    da.utils.assert_eq(d, r_d)\n    da.utils.assert_eq(r_a, r_d)\n\n\n@pytest.mark.parametrize(\n    \"dtype\",\n    [\n        np.int64,\n        np.float32,\n        np.float64,\n        np.complex64,\n        np.complex128,\n    ]\n)\n@pytest.mark.parametrize(\n    \"funcname, upcast_type\",\n    [\n        (\"fourier_shift\", numbers.Real),\n        (\"fourier_gaussian\", numbers.Integral),\n        (\"fourier_uniform\", numbers.Integral),\n    ]\n)\ndef test_fourier_filter_type(funcname, upcast_type, dtype):\n    if (\n            dtype in [np.int64, np.float64] and\n            funcname in [\"fourier_gaussian\", \"fourier_uniform\"]\n       ):\n        pytest.skip(\n            \"SciPy 1.0.0+ doesn't handle double precision values correctly.\"\n        )\n\n    dtype = np.dtype(dtype).type\n\n    s = 1\n\n    da_func = getattr(dask_image.ndfourier, funcname)\n    sp_func = getattr(scipy.ndimage, funcname)\n\n    a = np.arange(140.0).reshape(10, 14).astype(dtype)\n    d = da.from_array(a, chunks=(5, 7))\n\n    r_a = sp_func(a, s)\n    r_d = da_func(d, s)\n\n    assert d.chunks == r_d.chunks\n\n    da.utils.assert_eq(r_a, r_d)\n\n    if issubclass(dtype, upcast_type):\n        assert r_d.real.dtype.type is np.float64\n    else:\n        assert r_d.dtype.type is dtype\n\n\n@pytest.mark.parametrize(\n    \"shape, chunks\",\n    [\n        ((10, 14), (10, 14)),\n        ((10, 14), (5, 7)),\n        ((10, 14), (6, 8)),\n        ((10, 14), (4, 6)),\n        ((16,), (3, 6, 2, 5)),\n    ]\n)\n@pytest.mark.parametrize(\n    \"funcname\",\n    [\n        \"fourier_shift\",\n        \"fourier_gaussian\",\n        \"fourier_uniform\",\n    ]\n)\ndef test_fourier_filter_chunks(funcname, shape, chunks):\n    dtype = np.dtype(complex).type\n\n    s = 1\n\n    da_func = getattr(dask_image.ndfourier, funcname)\n    sp_func = getattr(scipy.ndimage, funcname)\n\n    a = np.arange(np.prod(shape)).reshape(shape).astype(dtype)\n    d = da.from_array(a, chunks=chunks)\n\n    r_a = sp_func(a, s)\n    r_d = da_func(d, s)\n\n    assert d.chunks == r_d.chunks\n\n    da.utils.assert_eq(r_a, r_d)\n\n\n@pytest.mark.parametrize(\n    \"s\",\n    [\n        -1,\n        (-1, -1),\n        (-1, 2),\n        (10, -9),\n        (1, 0),\n        (0, 2),\n    ]\n)\n@pytest.mark.parametrize(\n    \"funcname\",\n    [\n        \"fourier_shift\",\n        \"fourier_gaussian\",\n    ]\n)\ndef test_fourier_filter_non_positive(funcname, s):\n    da_func = getattr(dask_image.ndfourier, funcname)\n    sp_func = getattr(scipy.ndimage, funcname)\n\n    a = np.arange(140.0).reshape(10, 14).astype(complex)\n    d = da.from_array(a, chunks=(5, 7))\n\n    r_a = sp_func(a, s)\n    r_d = da_func(d, s)\n\n    assert d.chunks == r_d.chunks\n\n    da.utils.assert_eq(r_a, r_d)\n\n\n@pytest.mark.parametrize(\n    \"s\",\n    [\n        1,\n        0.5,\n        (1, 1),\n        (0.8, 1.5),\n        np.ones((2,)),\n        da.ones((2,), chunks=(2,)),\n        da.ones((2,), chunks=(1,)),\n    ]\n)\n@pytest.mark.parametrize(\n    \"funcname\",\n    [\n        \"fourier_shift\",\n        \"fourier_gaussian\",\n        \"fourier_uniform\",\n    ]\n)\n@pytest.mark.parametrize(\n    \"real_fft, axis\",\n    [\n        (True, -1),\n        (True, 0),\n        (False, -1),\n    ]\n)\ndef test_fourier_filter(funcname, s, real_fft, axis):\n    da_func = getattr(dask_image.ndfourier, funcname)\n    sp_func = getattr(scipy.ndimage, funcname)\n\n    shape = (10, 14)\n    n = 2 * shape[axis] - 1 if real_fft else -1\n    dtype = np.float64 if real_fft else np.complex128\n\n    a = np.arange(140.0).reshape(shape).astype(dtype)\n    d = da.from_array(a, chunks=(5, 7))\n\n    r_a = sp_func(a, s, n=n, axis=axis)\n    r_d = da_func(d, s, n=n, axis=axis)\n\n    assert d.chunks == r_d.chunks\n\n    da.utils.assert_eq(r_a, r_d)\n"
  },
  {
    "path": "tests/test_dask_image/test_ndinterp/test_affine_transformation.py",
    "content": "#!/usr/bin/env python\n# -*- coding: utf-8 -*-\nfrom packaging import version\n\nimport dask\nimport dask.array as da\nimport numpy as np\nimport pytest\nimport scipy\nimport scipy.ndimage\n\nimport dask_image.ndinterp\n\n# mode lists for the case with prefilter = False\n_supported_modes = ['constant', 'nearest']\n_unsupported_modes = ['wrap', 'reflect', 'mirror']\n\n# mode lists for the case with prefilter = True\n_supported_prefilter_modes = ['constant']\n_unsupported_prefilter_modes = _unsupported_modes + ['nearest']\n\nhave_scipy16 = version.parse(scipy.__version__) >= version.parse('1.6.0')\n\n# additional modes are present in SciPy >= 1.6.0\nif have_scipy16:\n    _supported_modes += ['grid-constant']\n    _unsupported_modes += ['grid-mirror', 'grid-wrap']\n    _unsupported_prefilter_modes += ['grid-constant', 'grid-mirror',\n                                     'grid-wrap']\n\n\ndef validate_affine_transform(n=2,\n                              matrix=None,\n                              offset=None,\n                              input_output_shape_per_dim=(16, 16),\n                              interp_order=1,\n                              interp_mode='constant',\n                              input_output_chunksize_per_dim=(6, 6),\n                              random_seed=0,\n                              use_cupy=False,\n                              prefilter=False\n                              ):\n    \"\"\"\n    Compare the outputs of `scipy.ndimage.affine_transformation`\n    and `dask_image.ndinterp.affine_transformation`.\n\n    Notes\n    -----\n        Currently, prefilter is disabled and therefore the output\n        of `dask_image.ndinterp.affine_transformation` is compared\n        to `prefilter=False`.\n    \"\"\"\n\n    if (interp_order > 1 and interp_mode == 'nearest' and not have_scipy16):\n        # not clear on the underlying cause, but this fails on older SciPy\n        pytest.skip(\"requires SciPy >= 1.6.0\")\n\n    # define test image\n    a = input_output_shape_per_dim[0]\n    np.random.seed(random_seed)\n    image = np.random.random([a] * n)\n\n    # transform into dask array\n    chunksize = [input_output_chunksize_per_dim[0]] * n\n    image_da = da.from_array(image, chunks=chunksize)\n    if use_cupy:\n        import cupy as cp\n        image_da = image_da.map_blocks(cp.asarray)\n\n    if (\n        prefilter\n        and interp_mode in _supported_prefilter_modes\n        and interp_order > 1\n        and version.parse(dask.__version__) < version.parse(\"2020.1.0\")\n    ):\n        # older dask will fail if any chunks have size smaller than depth\n        depth = dask_image.ndinterp._get_default_depth(interp_order)\n        in_size = input_output_shape_per_dim[0]\n        in_chunksize = input_output_chunksize_per_dim[0]\n        rem = in_size % in_chunksize\n        if in_size < depth or (rem != 0 and rem < depth):\n            pytest.skip(\"older dask doesn't automatically rechunk\")\n\n    # define (random) transformation\n    if matrix is None:\n        # make sure to substantially deviate from unity matrix\n        matrix = np.eye(n) + (np.random.random((n, n)) - 0.5) * 5.\n    if offset is None:\n        offset = (np.random.random(n) - 0.5) / 5. * np.array(image.shape)\n\n    # define resampling options\n    output_shape = [input_output_shape_per_dim[1]] * n\n    output_chunks = [input_output_chunksize_per_dim[1]] * n\n\n    # transform with scipy\n    image_t_scipy = scipy.ndimage.affine_transform(\n        image, matrix, offset,\n        output_shape=output_shape,\n        order=interp_order,\n        mode=interp_mode,\n        prefilter=prefilter)\n\n    # transform with dask-image\n    image_t_dask = dask_image.ndinterp.affine_transform(\n        image_da, matrix, offset,\n        output_shape=output_shape,\n        output_chunks=output_chunks,\n        order=interp_order,\n        mode=interp_mode,\n        prefilter=prefilter)\n    image_t_dask_computed = image_t_dask.compute()\n\n    assert np.allclose(image_t_scipy, image_t_dask_computed)\n\n\n@pytest.mark.parametrize(\"n\",\n                         [1, 2, 3])\n@pytest.mark.parametrize(\"input_output_shape_per_dim\",\n                         [(25, 25)])\n@pytest.mark.parametrize(\"interp_order\",\n                         range(6))\n@pytest.mark.parametrize(\"input_output_chunksize_per_dim\",\n                         [(16, 16), (16, 7), (7, 16)])\n@pytest.mark.parametrize(\"random_seed\",\n                         [0, 2])\ndef test_affine_transform_general(n,\n                                  input_output_shape_per_dim,\n                                  interp_order,\n                                  input_output_chunksize_per_dim,\n                                  random_seed):\n\n    kwargs = dict()\n    kwargs['n'] = n\n    kwargs['input_output_shape_per_dim'] = input_output_shape_per_dim\n    kwargs['interp_order'] = interp_order\n    kwargs['input_output_chunksize_per_dim'] = input_output_chunksize_per_dim\n    kwargs['random_seed'] = random_seed\n\n    validate_affine_transform(**kwargs)\n\n\n@pytest.mark.cupy\n@pytest.mark.parametrize(\"n\",\n                         [1, 2, 3])\n@pytest.mark.parametrize(\"input_output_shape_per_dim\",\n                         [(25, 25), (25, 10)])\n@pytest.mark.parametrize(\"interp_order\",\n                         [0, 1])\n@pytest.mark.parametrize(\"input_output_chunksize_per_dim\",\n                         [(16, 16), (16, 7)])\n@pytest.mark.parametrize(\"random_seed\",\n                         [0])\ndef test_affine_transform_cupy(n,\n                               input_output_shape_per_dim,\n                               interp_order,\n                               input_output_chunksize_per_dim,\n                               random_seed):\n    pytest.importorskip(\"cupy\", minversion=\"5.0.0\")\n\n    kwargs = dict()\n    kwargs['n'] = n\n    kwargs['input_output_shape_per_dim'] = input_output_shape_per_dim\n    kwargs['interp_order'] = interp_order\n    kwargs['input_output_chunksize_per_dim'] = input_output_chunksize_per_dim\n    kwargs['random_seed'] = random_seed\n    kwargs['use_cupy'] = True\n\n    validate_affine_transform(**kwargs)\n\n\n@pytest.mark.parametrize(\"n\",\n                         [1, 2, 3])\n@pytest.mark.parametrize(\"interp_mode\",\n                         _supported_modes)\n@pytest.mark.parametrize(\"interp_order\",\n                         [0, 3])\n@pytest.mark.parametrize(\"input_output_shape_per_dim\",\n                         [(20, 30)])\n@pytest.mark.parametrize(\"input_output_chunksize_per_dim\",\n                         [(15, 10)])\ndef test_affine_transform_modes(n,\n                                interp_mode,\n                                interp_order,\n                                input_output_shape_per_dim,\n                                input_output_chunksize_per_dim,\n                                ):\n\n    kwargs = dict()\n    kwargs['n'] = n\n    kwargs['interp_mode'] = interp_mode\n    kwargs['input_output_shape_per_dim'] = input_output_shape_per_dim\n    kwargs['input_output_chunksize_per_dim'] = input_output_chunksize_per_dim\n    kwargs['interp_order'] = interp_order\n    kwargs['prefilter'] = False\n\n    validate_affine_transform(**kwargs)\n\n\n@pytest.mark.parametrize(\"interp_mode\",\n                         _unsupported_modes)\ndef test_affine_transform_unsupported_modes(interp_mode):\n\n    with pytest.raises(NotImplementedError):\n        validate_affine_transform(interp_mode=interp_mode)\n\n\n@pytest.mark.parametrize(\"n\", [1, 2, 3])\n@pytest.mark.parametrize(\"interp_order\", range(6))\n@pytest.mark.parametrize(\"interp_mode\", _supported_prefilter_modes)\ndef test_affine_transform_prefilter_modes(n, interp_order, interp_mode):\n\n    validate_affine_transform(\n        n=n,\n        input_output_shape_per_dim=(32, 32),\n        input_output_chunksize_per_dim=(24, 24),\n        interp_order=interp_order,\n        interp_mode=interp_mode,\n        prefilter=True,\n    )\n\n\n@pytest.mark.parametrize(\"n\", [1, 2, 3])\n@pytest.mark.parametrize(\"interp_order\", range(2, 6))\n@pytest.mark.parametrize(\"interp_mode\", _unsupported_prefilter_modes)\ndef test_affine_transform_prefilter_not_implemented(\n    n, interp_order, interp_mode\n):\n\n    with pytest.raises(NotImplementedError):\n        validate_affine_transform(\n            n=n,\n            interp_order=interp_order,\n            interp_mode=interp_mode,\n            prefilter=True,\n        )\n\n\ndef test_affine_transform_numpy_input():\n\n    image = np.ones((3, 3))\n    image_t = dask_image.ndinterp.affine_transform(image, np.eye(2), [0, 0])\n\n    assert image_t.shape == image.shape\n    assert (image == image_t).min()\n\n\ndef test_affine_transform_minimal_input():\n\n    image = np.ones((3, 3))\n    image_t = dask_image.ndinterp.affine_transform(np.ones((3, 3)), np.eye(2))\n\n    assert image_t.shape == image.shape\n\n\ndef test_affine_transform_type_consistency():\n\n    image = da.ones((3, 3))\n    image_t = dask_image.ndinterp.affine_transform(image, np.eye(2), [0, 0])\n\n    assert isinstance(image, type(image_t))\n    assert isinstance(image[0, 0].compute(), type(image_t[0, 0].compute()))\n\n\n@pytest.mark.cupy\ndef test_affine_transform_type_consistency_gpu():\n\n    cupy = pytest.importorskip(\"cupy\", minversion=\"5.0.0\")\n\n    image = da.ones((3, 3))\n    image_t = dask_image.ndinterp.affine_transform(image, np.eye(2), [0, 0])\n\n    image.map_blocks(cupy.asarray)\n\n    assert isinstance(image, type(image_t))\n    assert isinstance(image[0, 0].compute(), type(image_t[0, 0].compute()))\n\n\ndef test_affine_transform_no_output_shape_or_chunks_specified():\n\n    image = da.ones((3, 3))\n    image_t = dask_image.ndinterp.affine_transform(image, np.eye(2), [0, 0])\n\n    assert image_t.shape == image.shape\n    assert image_t.chunks == tuple([(s,) for s in image.shape])\n\n\n@pytest.mark.timeout(15)\ndef test_affine_transform_large_input_small_output_cpu():\n    \"\"\"\n    Make sure input array does not need to be computed entirely\n    \"\"\"\n\n    # fully computed, this array would occupy 8TB\n    image = da.random.random([10000] * 3, chunks=(200, 200, 200))\n    image_t = dask_image.ndinterp.affine_transform(image, np.eye(3), [0, 0, 0],\n                                                   output_chunks=[1, 1, 1],\n                                                   output_shape=[1, 1, 1])\n\n    # if more than the needed chunks should be computed,\n    # this would take long and eventually raise a MemoryError\n    image_t[0, 0, 0].compute()\n\n\n@pytest.mark.cupy\n@pytest.mark.timeout(15)\ndef test_affine_transform_large_input_small_output_gpu():\n    \"\"\"\n    Make sure input array does not need to be computed entirely\n    \"\"\"\n    cupy = pytest.importorskip(\"cupy\", minversion=\"5.0.0\")\n\n    # this array would occupy more than 24GB on a GPU\n    image = da.random.random([2000] * 3, chunks=(50, 50, 50))\n    image.map_blocks(cupy.asarray)\n\n    image_t = dask_image.ndinterp.affine_transform(image, np.eye(3), [0, 0, 0],\n                                                   output_chunks=[1, 1, 1],\n                                                   output_shape=[1, 1, 1])\n    # if more than the needed chunks should be computed,\n    # this would take long and eventually raise a MemoryError\n    image_t[0, 0, 0].compute()\n\n\n@pytest.mark.filterwarnings(\"ignore:The behavior of affine_transform \"\n                            \"with a 1-D array supplied for the matrix \"\n                            \"parameter has changed\")\n@pytest.mark.parametrize(\"n\",\n                         [1, 2, 3, 4])\ndef test_affine_transform_parameter_formats(n):\n\n    # define reference parameters\n    scale_factors = np.ones(n, dtype=float) * 2.\n    matrix_n = np.diag(scale_factors)\n    offset = -np.ones(n)\n\n    # convert into different formats\n    matrix_only_scaling = scale_factors\n    matrix_pre_homogeneous = np.hstack((matrix_n, offset[:, None]))\n    matrix_homogeneous = np.vstack((matrix_pre_homogeneous,\n                                   [0] * n + [1]))\n\n    np.random.seed(0)\n    image = da.random.random([5] * n)\n\n    # reference run\n    image_t_0 = dask_image.ndinterp.affine_transform(image,\n                                                     matrix_n,\n                                                     offset).compute()\n\n    # assert that the different parameter formats\n    # lead to the same output\n    image_t_scale = dask_image.ndinterp.affine_transform(image,\n                                                         matrix_only_scaling,\n                                                         offset).compute()\n    assert np.allclose(image_t_0, image_t_scale)\n\n    for matrix in [matrix_pre_homogeneous, matrix_homogeneous]:\n\n        image_t = dask_image.ndinterp.affine_transform(image,\n                                                       matrix,\n                                                       offset + 10.,  # ignored\n                                                       ).compute()\n\n        assert np.allclose(image_t_0, image_t)\n\n    # catch matrices that are not homogeneous transformation matrices\n    with pytest.raises(ValueError):\n        matrix_not_homogeneous = np.vstack((matrix_pre_homogeneous,\n                                           [-1] * n + [1]))\n        dask_image.ndinterp.affine_transform(image,\n                                             matrix_not_homogeneous,\n                                             offset)\n"
  },
  {
    "path": "tests/test_dask_image/test_ndinterp/test_map_coordinates.py",
    "content": "#!/usr/bin/env python\n# -*- coding: utf-8 -*-\nfrom packaging import version\n\nimport dask.array as da\nimport numpy as np\nimport pytest\nimport scipy\nimport scipy.ndimage\n\nimport dask_image.ndinterp\n\n# mode lists for the case with prefilter = False\n_supported_modes = ['constant', 'nearest']\n_unsupported_modes = ['wrap', 'reflect', 'mirror']\n\n# mode lists for the case with prefilter = True\n_supported_prefilter_modes = ['constant']\n_unsupported_prefilter_modes = _unsupported_modes + ['nearest']\n\nhave_scipy16 = version.parse(scipy.__version__) >= version.parse('1.6.0')\n\n# additional modes are present in SciPy >= 1.6.0\nif have_scipy16:\n    _supported_modes += ['grid-constant']\n    _unsupported_modes += ['grid-mirror', 'grid-wrap']\n    _unsupported_prefilter_modes += ['grid-constant', 'grid-mirror',\n                                     'grid-wrap']\n\n\ndef validate_map_coordinates_general(n=2,\n                                     interp_order=1,\n                                     interp_mode='constant',\n                                     coord_len=12,\n                                     coord_chunksize=6,\n                                     coord_offset=0.,\n                                     im_shape_per_dim=12,\n                                     im_chunksize_per_dim=6,\n                                     random_seed=0,\n                                     prefilter=False,\n                                     ):\n\n    if interp_order > 1 and interp_mode == 'nearest' and not have_scipy16:\n        # not clear on the underlying cause, but this fails on older SciPy\n        pytest.skip(\"requires SciPy >= 1.6.0\")\n\n    # define test input\n    np.random.seed(random_seed)\n    input = np.random.random([im_shape_per_dim] * n)\n    input_da = da.from_array(input, chunks=im_chunksize_per_dim)\n\n    # define test coordinates\n    coords = np.random.random((n, coord_len)) * im_shape_per_dim + coord_offset\n    coords_da = da.from_array(coords, chunks=(n, coord_chunksize))\n\n    # ndimage result\n    mapped_scipy = scipy.ndimage.map_coordinates(\n        input,\n        coords,\n        order=interp_order,\n        mode=interp_mode,\n        cval=0.0,\n        prefilter=prefilter)\n\n    # dask-image results\n    for input_array in [input, input_da]:\n        for coords_array in [coords, coords_da]:\n            mapped_dask = dask_image.ndinterp.map_coordinates(\n                input_array,\n                coords_array,\n                order=interp_order,\n                mode=interp_mode,\n                cval=0.0,\n                prefilter=prefilter)\n\n            mapped_dask_computed = mapped_dask.compute()\n\n            assert np.allclose(mapped_scipy, mapped_dask_computed)\n\n\n@pytest.mark.parametrize(\"n\",\n                         [1, 2, 3, 4])\n@pytest.mark.parametrize(\"random_seed\",\n                         range(2))\ndef test_map_coordinates_basic(n,\n                               random_seed,\n                               ):\n\n    kwargs = dict()\n    kwargs['n'] = n\n    kwargs['random_seed'] = random_seed\n\n    validate_map_coordinates_general(**kwargs)\n\n\n@pytest.mark.timeout(3)\ndef test_map_coordinates_large_input():\n\n    \"\"\"\n    This test assesses whether relatively large\n    inputs are processed before timeout.\n    \"\"\"\n\n    # define large test image\n    image_da = da.random.random([1000] * 3, chunks=200)\n\n    # define sparse test coordinates\n    coords = np.random.random((3, 2)) * 1000\n\n    # dask-image result\n    dask_image.ndinterp.map_coordinates(\n        image_da,\n        coords).compute()\n\n\n@pytest.mark.parametrize(\"interp_mode\",\n                         _supported_modes)\ndef test_map_coordinates_out_of_bounds(interp_mode):\n    \"\"\"\n    This test checks that an error is raised when out-of-bounds\n    coordinates are used.\n    \"\"\"\n\n    kwargs = dict()\n    kwargs['random_seed'] = 0\n    kwargs['interp_mode'] = interp_mode\n    kwargs['im_shape_per_dim'] = 10\n    kwargs['coord_offset'] = 10  # coordinates will be out of bounds\n\n    validate_map_coordinates_general(**kwargs)\n"
  },
  {
    "path": "tests/test_dask_image/test_ndinterp/test_rotate.py",
    "content": "#!/usr/bin/env python\n# -*- coding: utf-8 -*-\n\nimport numpy as np\nimport dask.array as da\nimport pytest\nfrom scipy import ndimage\n\nimport dask_image.ndinterp as da_ndinterp\n\n\ndef validate_rotate(n=2,\n                    axes=(0, 1),\n                    reshape=False,\n                    input_shape_per_dim=16,\n                    interp_order=1,\n                    interp_mode='constant',\n                    input_output_chunksize_per_dim=(6, 6),\n                    random_seed=0,\n                    use_cupy=False,\n                    ):\n    \"\"\"\n    Compare the outputs of `ndimage.rotate`\n    and `dask_image.ndinterp.rotate`.\n    \"\"\"\n\n    # define test image\n    np.random.seed(random_seed)\n    image = np.random.random([input_shape_per_dim] * n)\n\n    angle = np.random.random() * 360 - 180\n\n    # transform into dask array\n    chunksize = [input_output_chunksize_per_dim[0]] * n\n    image_da = da.from_array(image, chunks=chunksize)\n\n    if use_cupy:\n        import cupy as cp\n        image_da = image_da.map_blocks(cp.asarray)\n\n    # define resampling options\n    output_chunks = [input_output_chunksize_per_dim[1]] * n\n\n    # transform with dask-image\n    image_t_dask = da_ndinterp.rotate(\n        image, angle,\n        axes=axes,\n        reshape=reshape,\n        order=interp_order,\n        mode=interp_mode,\n        prefilter=False,\n        output_chunks=output_chunks\n        )\n\n    image_t_dask_computed = image_t_dask.compute()\n\n    # transform with scipy\n    image_t_scipy = ndimage.rotate(\n        image, angle,\n        axes=axes,\n        reshape=reshape,\n        order=interp_order,\n        mode=interp_mode,\n        prefilter=False)\n\n    assert np.allclose(image_t_scipy, image_t_dask_computed)\n\n\n@pytest.mark.parametrize(\"n\",\n                         [2, 3])\n@pytest.mark.parametrize(\"input_shape_per_dim\",\n                         [25, 2])\n@pytest.mark.parametrize(\"interp_order\",\n                         [0, 1])\n@pytest.mark.parametrize(\"input_output_chunksize_per_dim\",\n                         [(16, 16), (16, 7), (7, 16)])\n@pytest.mark.parametrize(\"random_seed\",\n                         [0, 1, 2])\ndef test_rotate_general(n,\n                        input_shape_per_dim,\n                        interp_order,\n                        input_output_chunksize_per_dim,\n                        random_seed):\n\n    kwargs = dict()\n    kwargs['n'] = n\n    kwargs['input_shape_per_dim'] = input_shape_per_dim\n    kwargs['interp_order'] = interp_order\n    kwargs['input_output_chunksize_per_dim'] = input_output_chunksize_per_dim\n    kwargs['random_seed'] = random_seed\n\n    validate_rotate(**kwargs)\n\n\n@pytest.mark.cupy\n@pytest.mark.parametrize(\"n\",\n                         [2, 3])\n@pytest.mark.parametrize(\"input_shape_per_dim\",\n                         [25, 2])\n@pytest.mark.parametrize(\"interp_order\",\n                         [0, 1])\n@pytest.mark.parametrize(\"input_output_chunksize_per_dim\",\n                         [(16, 16), (16, 7)])\n@pytest.mark.parametrize(\"random_seed\",\n                         [0])\ndef test_rotate_cupy(n,\n                     input_shape_per_dim,\n                     interp_order,\n                     input_output_chunksize_per_dim,\n                     random_seed):\n\n    cupy = pytest.importorskip(\"cupy\", minversion=\"6.0.0\")    # noqa: F841\n\n    kwargs = dict()\n    kwargs['n'] = n\n    kwargs['input_shape_per_dim'] = input_shape_per_dim\n    kwargs['interp_order'] = interp_order\n    kwargs['input_output_chunksize_per_dim'] = input_output_chunksize_per_dim\n    kwargs['random_seed'] = random_seed\n    kwargs['use_cupy'] = True\n\n    validate_rotate(**kwargs)\n\n\n@pytest.mark.parametrize(\"n\",\n                         [2, 3])\n@pytest.mark.parametrize(\"interp_mode\",\n                         ['constant', 'nearest'])\n@pytest.mark.parametrize(\"input_shape_per_dim\",\n                         [20, 30])\n@pytest.mark.parametrize(\"input_output_chunksize_per_dim\",\n                         [(15, 10)])\ndef test_rotate_modes(n,\n                      interp_mode,\n                      input_shape_per_dim,\n                      input_output_chunksize_per_dim,\n                      ):\n\n    kwargs = dict()\n    kwargs['n'] = n\n    kwargs['interp_mode'] = interp_mode\n    kwargs['input_shape_per_dim'] = input_shape_per_dim\n    kwargs['input_output_chunksize_per_dim'] = input_output_chunksize_per_dim\n    kwargs['interp_order'] = 0\n\n    validate_rotate(**kwargs)\n\n\n@pytest.mark.parametrize(\"interp_mode\",\n                         ['wrap', 'reflect', 'mirror'])\ndef test_rotate_unsupported_modes(interp_mode):\n\n    kwargs = dict()\n    kwargs['interp_mode'] = interp_mode\n\n    with pytest.raises(NotImplementedError):\n        validate_rotate(**kwargs)\n\n\ndef test_rotate_dimensions():\n    with pytest.raises(ValueError):\n        validate_rotate(n=1)\n\n\n@pytest.mark.parametrize(\"axes\",\n                         [[1], [1, 2, 3],\n                          [-3, 0], [0, -3], [0, 3], [2, 0]])\ndef test_rotate_axisdimensions(axes):\n    kwargs = dict()\n    kwargs['axes'] = axes\n\n    with pytest.raises(ValueError):\n        validate_rotate(**kwargs)\n\n\n@pytest.mark.parametrize(\n    \"axes\",\n    [[1, 2.2], [1, 'a'], [[0, 1], 1], [(0, 1), 1], [0, {}]]\n)\ndef test_rotate_axistypes(axes):\n    kwargs = dict()\n    kwargs['axes'] = axes\n\n    with pytest.raises((ValueError, TypeError)):\n        validate_rotate(**kwargs)\n\n\n@pytest.mark.parametrize(\n    \"image\",\n    [\n        np.ones((3, 3)).astype(float),\n        np.ones((3, 3)).astype(int),\n        np.ones((3, 3)).astype(complex),\n    ]\n)\ndef test_rotate_dtype(image):\n    image_t = da_ndinterp.rotate(image, 0, reshape=False)\n    assert image_t.dtype == image.dtype\n\n\ndef test_rotate_numpy_input():\n    image = np.ones((3, 3))\n    image_t = da_ndinterp.rotate(image, 0, reshape=False)\n\n    assert image_t.shape == image.shape\n    assert (da.from_array(image) == image_t).min()\n\n\ndef test_rotate_minimal_input():\n    image = np.ones((3, 3))\n    image_t = da_ndinterp.rotate(np.ones((3, 3)), 0)\n\n    assert image_t.shape == image.shape\n\n\ndef test_rotate_type_consistency():\n    image = da.ones((3, 3))\n    image_t = da_ndinterp.rotate(image, 0)\n\n    assert isinstance(image, type(image_t))\n    assert isinstance(image[0, 0].compute(), type(image_t[0, 0].compute()))\n\n\n@pytest.mark.cupy\ndef test_rotate_type_consistency_gpu():\n    cupy = pytest.importorskip(\"cupy\", minversion=\"6.0.0\")\n\n    image = da.ones((3, 3))\n    image_t = da_ndinterp.rotate(image, 0)\n\n    image.map_blocks(cupy.asarray)\n\n    assert isinstance(image, type(image_t))\n    assert isinstance(image[0, 0].compute(), type(image_t[0, 0].compute()))\n\n\ndef test_rotate_no_chunks_specified():\n    image = da.ones((3, 3))\n    image_t = da_ndinterp.rotate(image, 0)\n\n    assert image_t.shape == image.shape\n    assert image_t.chunks == tuple([(s,) for s in image.shape])\n\n\ndef test_rotate_prefilter_not_implemented_error():\n    with pytest.raises(NotImplementedError):\n        da_ndinterp.rotate(\n            da.ones((15, 15)), 0,\n            order=3, prefilter=True, mode='nearest')\n"
  },
  {
    "path": "tests/test_dask_image/test_ndinterp/test_spline_filter.py",
    "content": "#!/usr/bin/env python\n# -*- coding: utf-8 -*-\n\nfrom packaging import version\n\nimport dask\nimport dask.array as da\nimport numpy as np\nimport pytest\nimport scipy\nimport scipy.ndimage\n\nimport dask_image.ndinterp\n\n# mode lists for the case with prefilter = False\n_supported_modes = ['constant', 'nearest', 'reflect', 'mirror']\n_unsupported_modes = ['wrap']\n\n# additional modes are present in SciPy >= 1.6.0\nif version.parse(scipy.__version__) >= version.parse('1.6.0'):\n    _supported_modes += ['grid-constant', 'grid-mirror', 'grid-wrap']\n\n\ndef validate_spline_filter(n=2,\n                           axis_size=64,\n                           interp_order=3,\n                           interp_mode='constant',\n                           chunksize=32,\n                           output=np.float64,\n                           random_seed=0,\n                           use_cupy=False,\n                           axis=None,\n                           input_as_non_dask_array=False,\n                           depth=None):\n    \"\"\"\n    Compare the outputs of `scipy.ndimage.spline_transform`\n    and `dask_image.ndinterp.spline_transform`. If axis is not None, then\n    `spline_transform1d` is tested instead.\n\n    \"\"\"\n    if (\n        np.dtype(output) != np.float64\n        and version.parse(scipy.__version__) < version.parse('1.4.0')\n    ):\n        pytest.skip(\"bug in output dtype handling in SciPy < 1.4\")\n\n    # define test image\n    np.random.seed(random_seed)\n    image = np.random.random([axis_size] * n)\n\n    if version.parse(dask.__version__) < version.parse(\"2020.1.0\"):\n        # older dask will fail if any chunks have size smaller than depth\n        _depth = dask_image.ndinterp._get_default_depth(interp_order)\n        rem = axis_size % chunksize\n        if chunksize < _depth or (rem != 0 and rem < _depth):\n            pytest.skip(\"older dask doesn't automatically rechunk\")\n\n    if input_as_non_dask_array:\n        if use_cupy:\n            import cupy as cp\n            image_da = cp.asarray(image)\n        else:\n            image_da = image\n    else:\n        # transform into dask array\n        image_da = da.from_array(image, chunks=[chunksize] * n)\n        if use_cupy:\n            import cupy as cp\n            image_da = image_da.map_blocks(cp.asarray)\n\n    if axis is not None:\n        scipy_func = scipy.ndimage.spline_filter1d\n        dask_image_func = dask_image.ndinterp.spline_filter1d\n        kwargs = {'axis': axis}\n    else:\n        scipy_func = scipy.ndimage.spline_filter\n        dask_image_func = dask_image.ndinterp.spline_filter\n        kwargs = {}\n\n    # transform with scipy\n    image_t_scipy = scipy_func(\n        image,\n        output=output,\n        order=interp_order,\n        mode=interp_mode,\n        **kwargs)\n\n    # transform with dask-image\n    image_t_dask = dask_image_func(\n        image_da,\n        output=output,\n        order=interp_order,\n        mode=interp_mode,\n        depth=depth,\n        **kwargs)\n    image_t_dask_computed = image_t_dask.compute()\n\n    rtol = atol = 1e-6\n    out_dtype = np.dtype(output)\n    assert image_t_scipy.dtype == image_t_dask_computed.dtype == out_dtype\n    assert np.allclose(image_t_scipy, image_t_dask_computed,\n                       rtol=rtol, atol=atol)\n\n\n@pytest.mark.parametrize(\"n\", [1, 2, 3])\n@pytest.mark.parametrize(\"axis_size\", [64])\n@pytest.mark.parametrize(\"interp_order\", range(2, 6))\n@pytest.mark.parametrize(\"interp_mode\", _supported_modes)\n@pytest.mark.parametrize(\"chunksize\", [32, 15])\ndef test_spline_filter_general(\n    n,\n    axis_size,\n    interp_order,\n    interp_mode,\n    chunksize,\n):\n\n    validate_spline_filter(\n        n=n,\n        axis_size=axis_size,\n        interp_order=interp_order,\n        interp_mode=interp_mode,\n        chunksize=chunksize,\n        axis=None,\n    )\n\n\n@pytest.mark.cupy\n@pytest.mark.parametrize(\"n\", [2])\n@pytest.mark.parametrize(\"axis_size\", [32])\n@pytest.mark.parametrize(\"interp_order\", range(2, 6))\n@pytest.mark.parametrize(\"interp_mode\", _supported_modes[::2])\n@pytest.mark.parametrize(\"chunksize\", [16])\n@pytest.mark.parametrize(\"axis\", [None, -1])\n@pytest.mark.parametrize(\"input_as_non_dask_array\", [False, True])\ndef test_spline_filter_cupy(\n    n,\n    axis_size,\n    interp_order,\n    interp_mode,\n    chunksize,\n    axis,\n    input_as_non_dask_array,\n):\n\n    pytest.importorskip(\"cupy\", minversion=\"9.0.0\")\n\n    validate_spline_filter(\n        n=n,\n        axis_size=axis_size,\n        interp_order=interp_order,\n        interp_mode=interp_mode,\n        chunksize=chunksize,\n        axis=axis,\n        input_as_non_dask_array=input_as_non_dask_array,\n        use_cupy=True,\n    )\n\n\n@pytest.mark.parametrize(\"n\", [1, 2, 3])\n@pytest.mark.parametrize(\"axis_size\", [48, 27])\n@pytest.mark.parametrize(\"interp_order\", range(2, 6))\n@pytest.mark.parametrize(\"interp_mode\", _supported_modes)\n@pytest.mark.parametrize(\"chunksize\", [33])\n@pytest.mark.parametrize(\"axis\", [0, 1, -1])\ndef test_spline_filter1d_general(\n    n,\n    axis_size,\n    interp_order,\n    interp_mode,\n    chunksize,\n    axis,\n):\n    if axis == 1 and n < 2:\n        pytest.skip(\"skip axis=1 for 1d signals\")\n\n    validate_spline_filter(\n        n=n,\n        axis_size=axis_size,\n        interp_order=interp_order,\n        interp_mode=interp_mode,\n        chunksize=chunksize,\n        axis=axis,\n    )\n\n\n@pytest.mark.parametrize(\"axis\", [None, -1])\ndef test_spline_filter_non_dask_array_input(axis):\n\n    validate_spline_filter(\n        axis=axis,\n        input_as_non_dask_array=True,\n    )\n\n\n@pytest.mark.parametrize(\"depth\", [None, 24])\n@pytest.mark.parametrize(\"axis\", [None, -1])\ndef test_spline_filter_non_default_depth(depth, axis):\n\n    validate_spline_filter(\n        axis=axis,\n        depth=depth,\n    )\n\n\n@pytest.mark.parametrize(\"depth\", [(16, 32), [18]])\ndef test_spline_filter1d_invalid_depth(depth):\n\n    with pytest.raises(ValueError):\n        validate_spline_filter(\n            axis=-1,\n            depth=depth,\n        )\n\n\n@pytest.mark.parametrize(\"axis_size\", [32])\n@pytest.mark.parametrize(\"interp_order\", range(2, 6))\n@pytest.mark.parametrize(\"interp_mode\", _unsupported_modes)\n@pytest.mark.parametrize(\"axis\", [None, -1])\ndef test_spline_filter_unsupported_modes(\n    axis_size,\n    interp_order,\n    interp_mode,\n    axis,\n):\n\n    with pytest.raises(NotImplementedError):\n        validate_spline_filter(\n            axis_size=axis_size,\n            interp_order=interp_order,\n            interp_mode=interp_mode,\n            axis=axis,\n        )\n\n\n@pytest.mark.parametrize(\n    \"output\", [np.float64, np.float32, \"float32\", np.dtype(np.float32)]\n)\n@pytest.mark.parametrize(\"axis\", [None, -1])\ndef test_spline_filter_output_dtype(output, axis):\n\n    validate_spline_filter(\n        axis_size=32,\n        interp_order=3,\n        output=output,\n        axis=axis,\n    )\n\n\n@pytest.mark.parametrize(\"axis\", [None, -1])\ndef test_spline_filter_array_output_unsupported(axis):\n\n    n = 2\n    axis_size = 32\n    shape = (axis_size,) * n\n\n    with pytest.raises(TypeError):\n        validate_spline_filter(\n            n=n,\n            axis_size=axis_size,\n            interp_order=3,\n            output=np.empty(shape),\n            axis=axis,\n        )\n"
  },
  {
    "path": "tests/test_dask_image/test_ndmeasure/__init__.py",
    "content": "# -*- coding: utf-8 -*-\n"
  },
  {
    "path": "tests/test_dask_image/test_ndmeasure/test__utils.py",
    "content": "#!/usr/bin/env python\n# -*- coding: utf-8 -*-\nimport pytest\nimport numpy as np\nimport dask.array as da\n\nimport dask_image.ndmeasure._utils\n\n\ndef test__norm_input_labels_index_err():\n    shape = (15, 16)\n    chunks = (4, 5)\n    ind = None\n\n    a = np.random.random(shape)\n    d = da.from_array(a, chunks=chunks)\n\n    lbls = (a < 0.5).astype(np.int64)\n    d_lbls = da.from_array(lbls, chunks=d.chunks)\n\n    lbls = lbls[:-1]\n    d_lbls = d_lbls[:-1]\n\n    with pytest.raises(ValueError):\n        dask_image.ndmeasure._utils._norm_input_labels_index(d, d_lbls, ind)\n\n\ndef test__norm_input_labels_index():\n    shape = (15, 16)\n    chunks = (4, 5)\n    ind = None\n\n    a = np.random.random(shape)\n    d = da.from_array(a, chunks=chunks)\n\n    lbls = (a < 0.5).astype(int)\n    d_lbls = da.from_array(lbls, chunks=d.chunks)\n\n    d_n, d_lbls_n, ind_n = dask_image.ndmeasure._utils._norm_input_labels_index(  # noqa: E501\n        d, d_lbls, ind\n    )\n\n    assert isinstance(d_n, da.Array)\n    assert isinstance(d_lbls_n, da.Array)\n    assert isinstance(ind_n, da.Array)\n\n    assert d_n.shape == d.shape\n    assert d_lbls_n.shape == d_lbls.shape\n    assert ind_n.shape == ()\n\n    da.utils.assert_eq(d_n, d)\n    da.utils.assert_eq(d_lbls_n, d_lbls)\n    da.utils.assert_eq(ind_n, np.array(1, dtype=int))\n\n\n@pytest.mark.parametrize(\n    \"shape, chunks, ind\", [\n        ((15, 16), (4, 5), [[1, 2, 3, 4]]),\n        ((15, 16), (4, 5), [[1, 2], [3, 4]]),\n        ((15, 16), (4, 5), [[[1], [2], [3], [4]]]),\n    ]\n)\ndef test__norm_input_labels_index_warn(shape, chunks, ind):\n    a = np.random.random(shape)\n    d = da.from_array(a, chunks=chunks)\n\n    lbls = np.zeros(a.shape, dtype=np.int64)\n    lbls += (\n        (a < 0.5).astype(lbls.dtype) +\n        (a < 0.25).astype(lbls.dtype) +\n        (a < 0.125).astype(lbls.dtype) +\n        (a < 0.0625).astype(lbls.dtype)\n    )\n    d_lbls = da.from_array(lbls, chunks=d.chunks)\n\n    ind = np.array(ind)\n    d_ind = da.from_array(ind, chunks=1)\n\n    with pytest.warns(FutureWarning) as w:\n        dask_image.ndmeasure._utils._norm_input_labels_index(\n            d, d_lbls, d_ind\n        )\n\n    if ind.ndim > 1:\n        assert len(w) == 1\n        w.pop(FutureWarning)\n    else:\n        assert len(w) == 0\n\n\n@pytest.mark.parametrize(\n    \"shape, chunks\", [\n        ((15,), (4,)),\n        ((15, 16), (4, 5)),\n        ((15, 1, 16), (4, 1, 5)),\n        ((15, 12, 16), (4, 5, 6)),\n    ]\n)\ndef test___ravel_shape_indices(shape, chunks):\n    a = np.arange(int(np.prod(shape)), dtype=np.int64).reshape(shape)\n    d = dask_image.ndmeasure._utils._ravel_shape_indices(\n        shape, dtype=np.int64, chunks=chunks\n    )\n\n    da.utils.assert_eq(d, a)\n"
  },
  {
    "path": "tests/test_dask_image/test_ndmeasure/test_core.py",
    "content": "#!/usr/bin/env python\n# -*- coding: utf-8 -*-\nimport itertools as it\nimport warnings as wrn\n\nimport pytest\nimport numpy as np\nimport scipy\nimport scipy.ndimage\n\nimport dask.array as da\n\nimport dask_image.ndmeasure\n\n\n@pytest.mark.parametrize(\n    \"funcname\", [\n        \"center_of_mass\",\n        \"extrema\",\n        \"maximum\",\n        \"maximum_position\",\n        \"mean\",\n        \"median\",\n        \"minimum\",\n        \"minimum_position\",\n        \"standard_deviation\",\n        \"sum_labels\",\n        \"variance\",\n    ]\n)\ndef test_measure_props_err(funcname):\n    da_func = getattr(dask_image.ndmeasure, funcname)\n\n    shape = (15, 16)\n    chunks = (4, 5)\n    ind = None\n\n    a = np.random.random(shape)\n    d = da.from_array(a, chunks=chunks)\n\n    lbls = (a < 0.5).astype(np.int64)\n    d_lbls = da.from_array(lbls, chunks=d.chunks)\n\n    lbls = lbls[:-1]\n    d_lbls = d_lbls[:-1]\n\n    with pytest.raises(ValueError):\n        da_func(d, lbls, ind)\n\n\n@pytest.mark.parametrize(\n    \"datatype\", [\n        int,\n        float,\n        np.bool_,\n        np.uint8,\n        np.uint16,\n        np.uint32,\n        np.uint64,\n        np.int16,\n        np.int32,\n        np.int64,\n        np.float32,\n        np.float64,\n    ]\n)\ndef test_center_of_mass(datatype):\n    a = np.array([[1, 1], [0, 0]]).astype(datatype)\n    d = da.from_array(a, chunks=(1, 2))\n\n    actual = dask_image.ndmeasure.center_of_mass(d).compute()\n    expected = [0., 0.5]\n\n    assert np.allclose(actual, expected)\n\n\n@pytest.mark.parametrize(\n    \"funcname\", [\n        \"center_of_mass\",\n        \"maximum\",\n        \"maximum_position\",\n        \"mean\",\n        \"median\",\n        \"minimum\",\n        \"minimum_position\",\n        \"standard_deviation\",\n        \"sum_labels\",\n        \"variance\",\n    ]\n)\n@pytest.mark.parametrize(\n    \"shape, chunks, has_lbls, ind\", [\n        ((5, 6, 4), (2, 3, 2), False, None),\n        ((15, 16), (4, 5), False, None),\n        ((15, 16), (4, 5), True, None),\n        ((15, 16), (4, 5), True, 0),\n        ((15, 16), (4, 5), True, 1),\n        ((15, 16), (4, 5), True, [1]),\n        ((15, 16), (4, 5), True, [1, 2]),\n        ((5, 6, 4), (2, 3, 2), True, [1, 2]),\n        ((15, 16), (4, 5), True, [1, 100]),\n        ((5, 6, 4), (2, 3, 2), True, [1, 100]),\n        ((15, 16), (4, 5), True, [[1, 2, 3, 4]]),\n        ((15, 16), (4, 5), True, [[1, 2], [3, 4]]),\n        ((15, 16), (4, 5), True, [[[1], [2], [3], [4]]]),\n    ]\n)\ndef test_measure_props(funcname, shape, chunks, has_lbls, ind):\n    sp_func = getattr(scipy.ndimage, funcname)\n    da_func = getattr(dask_image.ndmeasure, funcname)\n\n    a = np.random.random(shape)\n    d = da.from_array(a, chunks=chunks)\n\n    lbls = None\n    d_lbls = None\n\n    if has_lbls:\n        lbls = np.zeros(a.shape, dtype=np.int64)\n        lbls += (\n            (a < 0.5).astype(lbls.dtype) +\n            (a < 0.25).astype(lbls.dtype) +\n            (a < 0.125).astype(lbls.dtype) +\n            (a < 0.0625).astype(lbls.dtype)\n        )\n        d_lbls = da.from_array(lbls, chunks=d.chunks)\n\n    a_r = np.array(sp_func(a, lbls, ind))\n    d_r = da_func(d, d_lbls, ind)\n\n    if a_r.dtype != d_r.dtype:\n        wrn.warn(\n            \"Encountered a type mismatch.\"\n            \" Expected type, %s, but got type, %s.\"\n            \"\" % (str(a_r.dtype), str(d_r.dtype)),\n            RuntimeWarning\n        )\n    assert a_r.shape == d_r.shape\n\n    # See the linked issue for details.\n    # ref: https://github.com/scipy/scipy/issues/7706\n    if (\n        funcname == \"median\" and\n        ind is not None and\n        not np.isin(np.atleast_1d(ind), lbls).all()\n    ):\n        pytest.skip(\"SciPy's `median` mishandles missing labels.\")\n\n    assert np.allclose(np.array(a_r), np.array(d_r), equal_nan=True)\n\n\n@pytest.mark.parametrize(\n    \"shape, chunks, has_lbls, ind\", [\n        ((15, 16), (4, 5), False, None),\n        ((5, 6, 4), (2, 3, 2), False, None),\n        ((15, 16), (4, 5), True, None),\n        ((15, 16), (4, 5), True, 0),\n        ((15, 16), (4, 5), True, 1),\n        ((15, 16), (4, 5), True, [1]),\n        ((15, 16), (4, 5), True, [1, 2]),\n        ((5, 6, 4), (2, 3, 2), True, [1, 2]),\n        ((15, 16), (4, 5), True, [1, 100]),\n        ((5, 6, 4), (2, 3, 2), True, [1, 100]),\n        ((15, 16), (4, 5), True, [[1, 2, 3, 4]]),\n        ((15, 16), (4, 5), True, [[1, 2], [3, 4]]),\n        ((15, 16), (4, 5), True, [[[1], [2], [3], [4]]]),\n    ]\n)\ndef test_area(shape, chunks, has_lbls, ind):\n    a = np.random.random(shape)\n    d = da.from_array(a, chunks=chunks)\n\n    lbls = None\n    d_lbls = None\n\n    if has_lbls:\n        lbls = np.zeros(a.shape, dtype=np.int64)\n        lbls += (\n            (a < 0.5).astype(lbls.dtype) +\n            (a < 0.25).astype(lbls.dtype) +\n            (a < 0.125).astype(lbls.dtype) +\n            (a < 0.0625).astype(lbls.dtype)\n        )\n        d_lbls = da.from_array(lbls, chunks=d.chunks)\n\n    a_r = None\n    if has_lbls:\n        if ind is None:\n            a_r = lbls.astype(bool).astype(np.int64).sum()\n        else:\n            a_r = np.bincount(\n                lbls.flatten(),\n                minlength=(1 + max(np.array(ind).flatten()))\n            )\n            a_r = a_r[np.asarray(ind)]\n    else:\n        a_r = np.array(a.size)[()]\n\n    d_r = dask_image.ndmeasure.area(d, d_lbls, ind)\n\n    assert np.allclose(np.array(a_r), np.array(d_r), equal_nan=True)\n\n\n@pytest.mark.parametrize(\n    \"shape, chunks, has_lbls, ind\", [\n        ((15, 16), (4, 5), False, None),\n        ((5, 6, 4), (2, 3, 2), False, None),\n        ((15, 16), (4, 5), True, None),\n        ((15, 16), (4, 5), True, 0),\n        ((15, 16), (4, 5), True, 1),\n        ((15, 16), (4, 5), True, [1]),\n        ((15, 16), (4, 5), True, [1, 2]),\n        ((5, 6, 4), (2, 3, 2), True, [1, 2]),\n        ((15, 16), (4, 5), True, [1, 100]),\n        ((5, 6, 4), (2, 3, 2), True, [1, 100]),\n        ((15, 16), (4, 5), True, [[1, 2, 3, 4]]),\n        ((15, 16), (4, 5), True, [[1, 2], [3, 4]]),\n        ((15, 16), (4, 5), True, [[[1], [2], [3], [4]]]),\n    ]\n)\ndef test_extrema(shape, chunks, has_lbls, ind):\n    a = np.random.random(shape)\n    d = da.from_array(a, chunks=chunks)\n\n    lbls = None\n    d_lbls = None\n\n    if has_lbls:\n        lbls = np.zeros(a.shape, dtype=np.int64)\n        lbls += (\n            (a < 0.5).astype(lbls.dtype) +\n            (a < 0.25).astype(lbls.dtype) +\n            (a < 0.125).astype(lbls.dtype) +\n            (a < 0.0625).astype(lbls.dtype)\n        )\n        d_lbls = da.from_array(lbls, chunks=d.chunks)\n\n    a_r = scipy.ndimage.extrema(a, lbls, ind)\n    d_r = dask_image.ndmeasure.extrema(d, d_lbls, ind)\n\n    assert len(a_r) == len(d_r)\n\n    for i in range(len(a_r)):\n        a_r_i = np.array(a_r[i])\n        if a_r_i.dtype != d_r[i].dtype:\n            wrn.warn(\n                \"Encountered a type mismatch.\"\n                \" Expected type, %s, but got type, %s.\"\n                \"\" % (str(a_r_i.dtype), str(d_r[i].dtype)),\n                RuntimeWarning\n            )\n        assert a_r_i.shape == d_r[i].shape\n        assert np.allclose(a_r_i, np.array(d_r[i]), equal_nan=True)\n\n\n@pytest.mark.parametrize(\n    \"shape, chunks, has_lbls, ind\", [\n        ((15, 16), (4, 5), False, None),\n        ((5, 6, 4), (2, 3, 2), False, None),\n        ((15, 16), (4, 5), True, None),\n        ((15, 16), (4, 5), True, 0),\n        ((15, 16), (4, 5), True, 1),\n        ((15, 16), (4, 5), True, 100),\n        ((15, 16), (4, 5), True, [1]),\n        ((15, 16), (4, 5), True, [1, 2]),\n        ((5, 6, 4), (2, 3, 2), True, [1, 2]),\n        ((15, 16), (4, 5), True, [1, 100]),\n        ((5, 6, 4), (2, 3, 2), True, [1, 100]),\n    ]\n)\n@pytest.mark.parametrize(\n    \"min, max, bins\", [\n        (0, 1, 5),\n    ]\n)\ndef test_histogram(shape, chunks, has_lbls, ind, min, max, bins):\n    a = np.random.random(shape)\n    d = da.from_array(a, chunks=chunks)\n\n    lbls = None\n    d_lbls = None\n\n    if has_lbls:\n        lbls = np.zeros(a.shape, dtype=np.int64)\n        lbls += (\n            (a < 0.5).astype(lbls.dtype) +\n            (a < 0.25).astype(lbls.dtype) +\n            (a < 0.125).astype(lbls.dtype) +\n            (a < 0.0625).astype(lbls.dtype)\n        )\n        d_lbls = da.from_array(lbls, chunks=d.chunks)\n\n    a_r = scipy.ndimage.histogram(a, min, max, bins, lbls, ind)\n    d_r = dask_image.ndmeasure.histogram(d, min, max, bins, d_lbls, ind)\n\n    if ind is None or np.isscalar(ind):\n        if a_r is None:\n            assert d_r.compute() is None\n        else:\n            np.allclose(a_r, d_r.compute(), equal_nan=True)\n    else:\n        assert a_r.dtype == d_r.dtype\n        assert a_r.shape == d_r.shape\n        for i in it.product(*[range(_) for _ in a_r.shape]):\n            if a_r[i] is None:\n                assert d_r[i].compute() is None\n            else:\n                assert np.allclose(a_r[i], d_r[i].compute(), equal_nan=True)\n\n\ndef _assert_equivalent_labeling(labels0, labels1):\n    \"\"\"Make sure the two label arrays are equivalent.\n\n    In the sense that if two pixels have the same label in labels0, they will\n    also have the same label in labels1, and vice-versa.\n\n    We check this by verifying that there is exactly a one-to-one mapping\n    between the two label volumes.\n    \"\"\"\n    matching = np.stack((labels0.ravel(), labels1.ravel()), axis=1)\n    unique_matching = dask_image.ndmeasure._label._unique_axis(matching)\n    bincount0 = np.bincount(unique_matching[:, 0])\n    bincount1 = np.bincount(unique_matching[:, 1])\n    assert np.all(bincount0 == 1)\n    assert np.all(bincount1 == 1)\n\n\n@pytest.mark.parametrize(\n    \"seed, prob, shape, chunks, connectivity\", [\n        (42, 0.4, (15, 16), (15, 16), 1),\n        (42, 0.4, (15, 16), (4, 5), 1),\n        (42, 0.4, (15, 16), (4, 5), 2),\n        (42, 0.4, (15, 16), (4, 5), None),\n        (42, 0.4, (15, 16), (8, 5), 1),\n        (42, 0.4, (15, 16), (8, 5), 2),\n        (42, 0.3, (10, 8, 6), (5, 4, 3), 1),\n        (42, 0.3, (10, 8, 6), (5, 4, 3), 2),\n        (42, 0.3, (10, 8, 6), (5, 4, 3), 3),\n    ]\n)\ndef test_label(seed, prob, shape, chunks, connectivity):\n    np.random.seed(seed)\n\n    a = np.random.random(shape) < prob\n    d = da.from_array(a, chunks=chunks)\n\n    if connectivity is None:\n        s = None\n    else:\n        s = scipy.ndimage.generate_binary_structure(a.ndim, connectivity)\n\n    a_l, a_nl = scipy.ndimage.label(a, s)\n    d_l, d_nl = dask_image.ndmeasure.label(d, s)\n\n    assert a_nl == d_nl.compute()\n\n    assert a_l.dtype == d_l.dtype\n    assert a_l.shape == d_l.shape\n    _assert_equivalent_labeling(a_l, d_l.compute())\n\n\na = np.array(\n    [\n        [0, 0, 1, 0, 0, 1, 1, 0, 0, 0],\n        [0, 0, 1, 0, 0, 0, 0, 0, 0, 0],\n        [1, 1, 0, 0, 0, 0, 1, 1, 1, 1],\n        [1, 1, 0, 0, 0, 0, 1, 1, 1, 1],\n        [1, 0, 0, 0, 1, 0, 1, 1, 1, 0],\n        [0, 1, 0, 0, 1, 0, 1, 1, 1, 0],\n        [0, 0, 1, 0, 1, 0, 0, 0, 0, 0],\n        [0, 0, 0, 0, 0, 0, 0, 0, 0, 0],\n        [0, 0, 0, 0, 0, 0, 0, 0, 0, 0],\n        [0, 0, 1, 0, 0, 1, 1, 0, 0, 0],\n    ]\n)\n\n\n@pytest.mark.parametrize(\n    \"a, a_res, wrap_axes, connectivity, chunks\",\n    [\n        pytest.param(\n            a,\n            np.array(\n                [\n                    [0, 0, 1, 0, 0, 3, 3, 0, 0, 0],\n                    [0, 0, 1, 0, 0, 0, 0, 0, 0, 0],\n                    [1, 1, 0, 0, 0, 0, 1, 1, 1, 1],\n                    [1, 1, 0, 0, 0, 0, 1, 1, 1, 1],\n                    [1, 0, 0, 0, 2, 0, 1, 1, 1, 0],\n                    [0, 1, 0, 0, 2, 0, 1, 1, 1, 0],\n                    [0, 0, 1, 0, 2, 0, 0, 0, 0, 0],\n                    [0, 0, 0, 0, 0, 0, 0, 0, 0, 0],\n                    [0, 0, 0, 0, 0, 0, 0, 0, 0, 0],\n                    [0, 0, 4, 0, 0, 5, 5, 0, 0, 0],\n                ]\n            ),\n            (1,),\n            2,\n            (5, 5),\n            id=\"2d, wrapping 1st axis.\",\n        ),\n        pytest.param(\n            a,\n            np.array(\n                [\n                    [0, 0, 1, 0, 0, 3, 3, 0, 0, 0],\n                    [0, 0, 1, 0, 0, 0, 0, 0, 0, 0],\n                    [1, 1, 0, 0, 0, 0, 4, 4, 4, 4],\n                    [1, 1, 0, 0, 0, 0, 4, 4, 4, 4],\n                    [1, 0, 0, 0, 2, 0, 4, 4, 4, 0],\n                    [0, 1, 0, 0, 2, 0, 4, 4, 4, 0],\n                    [0, 0, 1, 0, 2, 0, 0, 0, 0, 0],\n                    [0, 0, 0, 0, 0, 0, 0, 0, 0, 0],\n                    [0, 0, 0, 0, 0, 0, 0, 0, 0, 0],\n                    [0, 0, 1, 0, 0, 3, 3, 0, 0, 0],\n                ]\n            ),\n            (0,),\n            2,\n            (5, 5),\n            id=\"2d, wrapping 0th axes.\",\n        ),\n        pytest.param(\n            a,\n            np.array(\n                [\n                    [0, 0, 1, 0, 0, 3, 3, 0, 0, 0],\n                    [0, 0, 1, 0, 0, 0, 0, 0, 0, 0],\n                    [1, 1, 0, 0, 0, 0, 1, 1, 1, 1],\n                    [1, 1, 0, 0, 0, 0, 1, 1, 1, 1],\n                    [1, 0, 0, 0, 2, 0, 1, 1, 1, 0],\n                    [0, 1, 0, 0, 2, 0, 1, 1, 1, 0],\n                    [0, 0, 1, 0, 2, 0, 0, 0, 0, 0],\n                    [0, 0, 0, 0, 0, 0, 0, 0, 0, 0],\n                    [0, 0, 0, 0, 0, 0, 0, 0, 0, 0],\n                    [0, 0, 1, 0, 0, 3, 3, 0, 0, 0],\n                ]\n            ),\n            (0, 1),\n            2,\n            (5, 5),\n            id=\"2d, wrapping both axes\",\n        ),\n        pytest.param(\n            np.array([[1, 0, 0, 0, 0], [0, 0, 0, 0, 0], [0, 0, 0, 0, 1]]),\n            np.array([[1, 0, 0, 0, 0], [0, 0, 0, 0, 0], [0, 0, 0, 0, 1]]),\n            (0, 1),\n            2,\n            \"auto\",\n            id=\"2d, full wrap, high connectivity (corners).\",\n        ),\n        pytest.param(\n            np.array([[1, 0, 0, 0, 0], [0, 0, 0, 0, 0], [0, 0, 0, 0, 1]]),\n            # Corners should not be connected for lower connectivity.\n            np.array([[1, 0, 0, 0, 0], [0, 0, 0, 0, 0], [0, 0, 0, 0, 2]]),\n            (0, 1),\n            1,\n            \"auto\",\n            id=\"2d, full wrap, low connectivity (no corners).\",\n        ),\n        # 3d\n        pytest.param(\n            np.array(\n                [\n                    [[0, 0, 0, 0, 0], [1, 0, 0, 0, 1], [0, 0, 0, 0, 0]],\n                    [[0, 0, 0, 0, 0], [0, 0, 0, 0, 0], [0, 0, 0, 0, 0]],\n                    [[0, 0, 0, 0, 0], [1, 0, 0, 0, 1], [1, 0, 0, 0, 1]],\n                ]\n            ),\n            np.array(\n                [\n                    [[0, 0, 0, 0, 0], [1, 0, 0, 0, 2], [0, 0, 0, 0, 0]],\n                    [[0, 0, 0, 0, 0], [0, 0, 0, 0, 0], [0, 0, 0, 0, 0]],\n                    [[0, 0, 0, 0, 0], [3, 0, 0, 0, 4], [3, 0, 0, 0, 4]],\n                ]\n            ),\n            None,\n            3,\n            \"auto\",\n            id=\"3d no wrap\",\n        ),\n        pytest.param(\n            np.array(\n                [\n                    [[0, 0, 0, 0, 0], [1, 0, 0, 0, 1], [0, 0, 0, 0, 0]],\n                    [[0, 0, 0, 0, 0], [0, 0, 0, 0, 0], [0, 0, 0, 0, 0]],\n                    [[0, 0, 0, 0, 0], [1, 0, 0, 0, 1], [1, 0, 0, 0, 1]],\n                ]\n            ),\n            np.array(\n                [\n                    [[0, 0, 0, 0, 0], [1, 0, 0, 0, 1], [0, 0, 0, 0, 0]],\n                    [[0, 0, 0, 0, 0], [0, 0, 0, 0, 0], [0, 0, 0, 0, 0]],\n                    [[0, 0, 0, 0, 0], [2, 0, 0, 0, 2], [2, 0, 0, 0, 2]],\n                ]\n            ),\n            (2,),\n            3,\n            \"auto\",\n            id=\"3d wrap 2nd axis\",\n        ),\n        pytest.param(\n            np.array(\n                [\n                    [\n                        [0, 0, 0, 0, 1],\n                        [0, 0, 0, 0, 0],\n                        [0, 0, 0, 0, 0],\n                        [0, 0, 0, 0, 0],\n                        [0, 0, 0, 0, 0],\n                        [0, 0, 0, 0, 0],\n                        [1, 0, 0, 0, 0],\n                    ],\n                    [\n                        [0, 0, 0, 0, 0],\n                        [0, 0, 0, 0, 0],\n                        [0, 0, 0, 0, 0],\n                        [1, 0, 0, 0, 1],\n                        [0, 0, 0, 0, 0],\n                        [0, 0, 0, 0, 0],\n                        [0, 0, 0, 0, 0],\n                    ],\n                    [\n                        [0, 0, 0, 0, 0],\n                        [0, 0, 0, 0, 0],\n                        [0, 0, 0, 0, 0],\n                        [0, 0, 0, 0, 0],\n                        [0, 0, 0, 0, 0],\n                        [0, 0, 0, 0, 0],\n                        [1, 0, 0, 0, 1],\n                    ],\n                ]\n            ),\n            np.array(\n                [\n                    [\n                        [0, 0, 0, 0, 1],\n                        [0, 0, 0, 0, 0],\n                        [0, 0, 0, 0, 0],\n                        [0, 0, 0, 0, 0],\n                        [0, 0, 0, 0, 0],\n                        [0, 0, 0, 0, 0],\n                        [1, 0, 0, 0, 0],\n                    ],\n                    [\n                        [0, 0, 0, 0, 0],\n                        [0, 0, 0, 0, 0],\n                        [0, 0, 0, 0, 0],\n                        [2, 0, 0, 0, 2],\n                        [0, 0, 0, 0, 0],\n                        [0, 0, 0, 0, 0],\n                        [0, 0, 0, 0, 0],\n                    ],\n                    [\n                        [0, 0, 0, 0, 0],\n                        [0, 0, 0, 0, 0],\n                        [0, 0, 0, 0, 0],\n                        [0, 0, 0, 0, 0],\n                        [0, 0, 0, 0, 0],\n                        [0, 0, 0, 0, 0],\n                        [3, 0, 0, 0, 3],\n                    ],\n                ]\n            ),\n            (1, 2),\n            3,\n            \"auto\",\n            id=\"3d, wrap 1st and 2nd axis, with corners\",\n        ),\n        pytest.param(\n            np.array(\n                [\n                    [\n                        [0, 0, 0, 0, 1],\n                        [0, 0, 0, 0, 0],\n                        [0, 0, 0, 0, 0],\n                        [0, 0, 0, 0, 0],\n                        [0, 0, 0, 0, 0],\n                        [0, 0, 0, 0, 0],\n                        [1, 0, 0, 0, 0],\n                    ],\n                    [\n                        [0, 0, 0, 0, 1],\n                        [0, 0, 0, 0, 0],\n                        [0, 0, 0, 0, 0],\n                        [1, 0, 0, 0, 1],\n                        [0, 0, 0, 0, 0],\n                        [0, 0, 0, 0, 0],\n                        [0, 0, 0, 0, 0],\n                    ],\n                    [\n                        [0, 0, 0, 0, 0],\n                        [0, 0, 0, 0, 0],\n                        [0, 0, 0, 0, 0],\n                        [0, 0, 0, 0, 0],\n                        [0, 0, 0, 0, 0],\n                        [0, 0, 0, 0, 0],\n                        [1, 0, 0, 0, 1],\n                    ],\n                ]\n            ),\n            np.array(\n                [\n                    [\n                        [0, 0, 0, 0, 1],\n                        [0, 0, 0, 0, 0],\n                        [0, 0, 0, 0, 0],\n                        [0, 0, 0, 0, 0],\n                        [0, 0, 0, 0, 0],\n                        [0, 0, 0, 0, 0],\n                        [1, 0, 0, 0, 0],\n                    ],\n                    [\n                        [0, 0, 0, 0, 1],\n                        [0, 0, 0, 0, 0],\n                        [0, 0, 0, 0, 0],\n                        [2, 0, 0, 0, 2],\n                        [0, 0, 0, 0, 0],\n                        [0, 0, 0, 0, 0],\n                        [0, 0, 0, 0, 0],\n                    ],\n                    [\n                        [0, 0, 0, 0, 0],\n                        [0, 0, 0, 0, 0],\n                        [0, 0, 0, 0, 0],\n                        [0, 0, 0, 0, 0],\n                        [0, 0, 0, 0, 0],\n                        [0, 0, 0, 0, 0],\n                        [1, 0, 0, 0, 1],\n                    ],\n                ]\n            ),\n            (1, 2),\n            3,\n            \"auto\",\n            id=\"3d, with corners, connection through adjacent timesteps.\",\n        ),\n    ],\n)\ndef test_label_wrap(a, a_res, wrap_axes, connectivity, chunks):\n    d = da.from_array(a, chunks=chunks)\n\n    s = scipy.ndimage.generate_binary_structure(a.ndim, connectivity)\n\n    d_l, _ = dask_image.ndmeasure.label(d, s, wrap_axes=wrap_axes)\n\n    _assert_equivalent_labeling(a_res, d_l.compute())\n\n\n@pytest.mark.parametrize(\n    \"ndim\", (2, 3, 4, 5)\n)\ndef test_label_full_struct_element(ndim):\n\n    full_s = scipy.ndimage.generate_binary_structure(ndim, ndim)\n    orth_s = scipy.ndimage.generate_binary_structure(ndim, ndim - 1)\n\n    # create a mask that represents a single connected component\n    # under the full (highest rank) structuring element\n    # but several connected components under the orthogonal\n    # structuring element\n    mask = full_s ^ orth_s\n    mask[tuple([1] * ndim)] = True\n\n    # create dask array with chunk boundary\n    # that passes through the mask\n    mask_da = da.from_array(mask, chunks=[2] * ndim)\n\n    labels_ndi, N_ndi = scipy.ndimage.label(mask, structure=full_s)\n    labels_di_da, N_di_da = dask_image.ndmeasure.label(\n        mask_da, structure=full_s)\n\n    assert N_ndi == N_di_da.compute()\n\n    _assert_equivalent_labeling(\n        labels_ndi,\n        labels_di_da.compute())\n\n\n@pytest.mark.parametrize(\n    \"shape, chunks, ind\", [\n        ((15, 16), (4, 5), None),\n        ((5, 6, 4), (2, 3, 2), None),\n        ((15, 16), (4, 5), 0),\n        ((15, 16), (4, 5), 1),\n        ((15, 16), (4, 5), [1]),\n        ((15, 16), (4, 5), [1, 2]),\n        ((5, 6, 4), (2, 3, 2), [1, 2]),\n        ((15, 16), (4, 5), [1, 100]),\n        ((5, 6, 4), (2, 3, 2), [1, 100]),\n    ]\n)\n@pytest.mark.parametrize(\n    \"default\", [\n        None,\n        0,\n        1.5,\n    ]\n)\n@pytest.mark.parametrize(\n    \"pass_positions\", [\n        False,\n        True,\n    ]\n)\ndef test_labeled_comprehension(shape, chunks, ind, default, pass_positions):\n    a = np.random.random(shape)\n    d = da.from_array(a, chunks=chunks)\n\n    lbls = np.zeros(a.shape, dtype=np.int64)\n    lbls += (\n        (a < 0.5).astype(lbls.dtype) +\n        (a < 0.25).astype(lbls.dtype) +\n        (a < 0.125).astype(lbls.dtype) +\n        (a < 0.0625).astype(lbls.dtype)\n    )\n    d_lbls = da.from_array(lbls, chunks=d.chunks)\n\n    def func(val, pos=None):\n        if pos is None:\n            pos = 0 * val + 1\n\n        return (val * pos).sum() / (1 + val.max() * pos.max())\n\n    a_cm = scipy.ndimage.labeled_comprehension(\n        a, lbls, ind, func, np.float64, default, pass_positions\n    )\n    d_cm = dask_image.ndmeasure.labeled_comprehension(\n        d, d_lbls, ind, func, np.float64, default, pass_positions\n    )\n\n    assert a_cm.dtype == d_cm.dtype\n    assert a_cm.shape == d_cm.shape\n    assert np.allclose(np.array(a_cm), np.array(d_cm), equal_nan=True)\n\n\n@pytest.mark.parametrize(\n    \"shape, chunks, ind\", [\n        ((15, 16), (4, 5), None),\n        ((5, 6, 4), (2, 3, 2), None),\n        ((15, 16), (4, 5), 0),\n        ((15, 16), (4, 5), 1),\n        ((15, 16), (4, 5), [1]),\n        ((15, 16), (4, 5), [1, 2]),\n        ((5, 6, 4), (2, 3, 2), [1, 2]),\n        ((15, 16), (4, 5), [1, 100]),\n        ((5, 6, 4), (2, 3, 2), [1, 100]),\n    ]\n)\ndef test_labeled_comprehension_struct(shape, chunks, ind):\n    a = np.random.random(shape)\n    d = da.from_array(a, chunks=chunks)\n\n    lbls = np.zeros(a.shape, dtype=np.int64)\n    lbls += (\n        (a < 0.5).astype(lbls.dtype) +\n        (a < 0.25).astype(lbls.dtype) +\n        (a < 0.125).astype(lbls.dtype) +\n        (a < 0.0625).astype(lbls.dtype)\n    )\n    d_lbls = da.from_array(lbls, chunks=d.chunks)\n\n    dtype = np.dtype([(\"val\", np.float64), (\"pos\", int)])\n    default = np.array((np.nan, -1), dtype=dtype)\n\n    def func_max(val):\n        return np.max(val)\n\n    def func_argmax(val, pos):\n        return pos[np.argmax(val)]\n\n    def func_max_argmax(val, pos):\n        result = np.empty((), dtype=dtype)\n\n        i = np.argmax(val)\n\n        result[\"val\"] = val[i]\n        result[\"pos\"] = pos[i]\n\n        return result[()]\n\n    a_max = scipy.ndimage.labeled_comprehension(\n        a, lbls, ind, func_max, dtype[\"val\"], default[\"val\"], False\n    )\n    a_argmax = scipy.ndimage.labeled_comprehension(\n        a, lbls, ind, func_argmax, dtype[\"pos\"], default[\"pos\"], True\n    )\n\n    d_max_argmax = dask_image.ndmeasure.labeled_comprehension(\n        d, d_lbls, ind, func_max_argmax, dtype, default, True\n    )\n    d_max = d_max_argmax[\"val\"]\n    d_argmax = d_max_argmax[\"pos\"]\n\n    assert dtype == d_max_argmax.dtype\n\n    for e_a_r, e_d_r in zip([a_max, a_argmax], [d_max, d_argmax]):\n        assert e_a_r.dtype == e_d_r.dtype\n        assert e_a_r.shape == e_d_r.shape\n        assert np.allclose(np.array(e_a_r), np.array(e_d_r), equal_nan=True)\n\n\n@pytest.mark.parametrize(\n    \"shape, chunks, ind\", [\n        ((15, 16), (4, 5), None),\n        ((5, 6, 4), (2, 3, 2), None),\n        ((15, 16), (4, 5), 0),\n        ((15, 16), (4, 5), 1),\n        ((15, 16), (4, 5), [1]),\n        ((15, 16), (4, 5), [1, 2]),\n        ((5, 6, 4), (2, 3, 2), [1, 2]),\n        ((15, 16), (4, 5), [1, 100]),\n        ((5, 6, 4), (2, 3, 2), [1, 100]),\n    ]\n)\ndef test_labeled_comprehension_object(shape, chunks, ind):\n    a = np.random.random(shape)\n    d = da.from_array(a, chunks=chunks)\n\n    lbls = np.zeros(a.shape, dtype=np.int64)\n    lbls += (\n        (a < 0.5).astype(lbls.dtype) +\n        (a < 0.25).astype(lbls.dtype) +\n        (a < 0.125).astype(lbls.dtype) +\n        (a < 0.0625).astype(lbls.dtype)\n    )\n    d_lbls = da.from_array(lbls, chunks=d.chunks)\n\n    dtype = np.dtype(object)\n\n    default = None\n\n    def func_min_max(val):\n        return np.array([np.min(val), np.max(val)])\n\n    a_r = scipy.ndimage.labeled_comprehension(\n        a, lbls, ind, func_min_max, dtype, default, False\n    )\n\n    d_r = dask_image.ndmeasure.labeled_comprehension(\n        d, d_lbls, ind, func_min_max, dtype, default, False\n    )\n\n    if ind is None or np.isscalar(ind):\n        if a_r is None:\n            assert d_r.compute() is None\n        else:\n            np.allclose(a_r, d_r.compute(), equal_nan=True)\n    else:\n        assert a_r.dtype == d_r.dtype\n        assert a_r.shape == d_r.shape\n        for i in it.product(*[range(_) for _ in a_r.shape]):\n            if a_r[i] is None:\n                assert d_r[i].compute() is None\n            else:\n                assert np.allclose(a_r[i], d_r[i].compute(), equal_nan=True)\n"
  },
  {
    "path": "tests/test_dask_image/test_ndmeasure/test_find_objects.py",
    "content": "import pytest\n\npd = pytest.importorskip(\"pandas\")\ndd = pytest.importorskip(\"dask.dataframe\")\n\nimport dask.array as da  # noqa: E402\nimport numpy as np  # noqa: E402\n\nimport dask_image.ndmeasure  # noqa: E402\n\n\n@pytest.fixture\ndef label_image():\n    \"\"\"Return small label image for tests.\n\n    dask.array<array, shape=(5, 10), dtype=int64, chunksize=(5, 5), chunktype=numpy.ndarray>\n\n    array([[   0,   0,   0,   0,   0,   0,   0, 333, 333, 333],\n            [111, 111,   0,   0,   0,   0,   0, 333, 333, 333],\n            [111, 111,   0,   0,   0,   0,   0,   0,   0,   0],\n            [  0,   0,   0, 222, 222, 222, 222, 222, 222,   0],\n            [  0,   0,   0,   0,   0,   0,   0,   0,   0,   0]])\n\n    \"\"\"  # noqa: E501\n    label_image = np.zeros((5, 10)).astype(int)\n    label_image[1:3, 0:2] = 111\n    label_image[3, 3:-2] = 222\n    label_image[0:2, -3:] = 333\n    label_image = da.from_array(label_image, chunks=(5, 5))\n    return label_image\n\n\n@pytest.fixture\ndef label_image_with_empty_chunk():\n    \"\"\"Return small label image with an empty chunk for tests.\n\n    dask.array<array, shape=(6, 6), dtype=int64, chunksize=(3, 3), chunktype=numpy.ndarray>\n\n    array([[   0,   0,   0,   0,   0,   0],\n            [111, 111,   0,   0,   0,   0],\n            [111, 111,   0,   0,   0,   0],\n            [  0,   0,   0,   0,   0,   0],\n            [  0,   0,   0, 222, 222, 222],\n            [  0,   0,   0,   0,   0,   0]])\n    \"\"\"  # noqa: E501\n    label_image = np.zeros((6, 6)).astype(int)\n    label_image[1:3, 0:2] = 111\n    label_image[4, 3:] = 222\n    label_image = da.from_array(label_image, chunks=(3, 3))\n    return label_image\n\n\ndef test_find_objects_err(label_image):\n    label_image = label_image.astype(float)\n    with pytest.raises(ValueError):\n        dask_image.ndmeasure.find_objects(label_image)\n\n\ndef test_empty_chunk():\n    test_labels = da.zeros((10, 10), dtype='int', chunks=(3, 3))\n    test_labels[0, 0] = 1\n    computed_result = dask_image.ndmeasure.find_objects(test_labels).compute()\n    expected = pd.DataFrame.from_dict({0: {1: slice(0, 1)},\n                                       1: {1: slice(0, 1)}, })\n    assert computed_result.equals(expected)\n\n\ndef test_find_objects(label_image):\n    result = dask_image.ndmeasure.find_objects(label_image)\n    assert isinstance(result, dd.DataFrame)\n    computed_result = result.compute()\n    assert isinstance(computed_result, pd.DataFrame)\n    expected = pd.DataFrame.from_dict({\n        0: {111: slice(1, 3), 222: slice(3, 4), 333: slice(0, 2)},\n        1: {111: slice(0, 2), 222: slice(3, 8), 333: slice(7, 10)},\n    })\n    assert computed_result.equals(expected)\n\n\ndef test_3d_find_objects(label_image):\n    label_image = da.stack([label_image, label_image], axis=2)\n    result = dask_image.ndmeasure.find_objects(label_image)\n    assert isinstance(result, dd.DataFrame)\n    computed_result = result.compute()\n    assert isinstance(computed_result, pd.DataFrame)\n    expected = pd.DataFrame.from_dict({\n        0: {111: slice(1, 3), 222: slice(3, 4), 333: slice(0, 2)},\n        1: {111: slice(0, 2), 222: slice(3, 8), 333: slice(7, 10)},\n        2: {111: slice(0, 2), 222: slice(0, 2), 333: slice(0, 2)},\n    })\n    assert computed_result.equals(expected)\n\n\ndef test_find_objects_with_empty_chunks(label_image_with_empty_chunk):\n    result = dask_image.ndmeasure.find_objects(label_image_with_empty_chunk)\n    assert isinstance(result, dd.DataFrame)\n    computed_result = result.compute()\n    assert isinstance(computed_result, pd.DataFrame)\n    expected = pd.DataFrame.from_dict({\n        0: {111: slice(1, 3, None), 222: slice(4, 5, None)},\n        1: {111: slice(0, 2, None), 222: slice(3, 6, None)},\n    })\n    assert computed_result.equals(expected)\n"
  },
  {
    "path": "tests/test_dask_image/test_ndmeasure/test_find_objects_no_dataframe.py",
    "content": "\"\"\"\nTest that ``find_objects`` raises a helpful ``ImportError`` when the\noptional ``dask[dataframe]`` / ``pandas`` dependencies are not installed.\n\nThis is skipped if both dependencies are installed.\n\n\"\"\"\nimport dask.array as da\nimport pytest\n\nimport dask_image.ndmeasure\n\n\ntry:\n    import pandas  # noqa: F401\n    import dask.dataframe  # noqa: F401\n    dataframe_available = True\nexcept ImportError:\n    dataframe_available = False\n\n\n@pytest.mark.skipif(\n    dataframe_available,\n    reason=\"dataframe dependencies are installed; \"\n           \"ImportError path only triggers without them\",\n)\ndef test_find_objects_raises_import_error_without_pandas():\n    label_image = da.zeros((3, 3), dtype=int, chunks=(3, 3))\n    with pytest.raises(\n        ImportError,\n        match=(\n            r\"dask_image\\.ndmeasure\\.find_objects requires the optional \"\n            r\"dependencies `dask\\[dataframe\\]` and `pandas`\\. \"\n            r\"Install them with `pip install dask-image\\[dataframe\\]`\\.\"\n        ),\n    ):\n        dask_image.ndmeasure.find_objects(label_image)\n"
  },
  {
    "path": "tests/test_dask_image/test_ndmorph/__init__.py",
    "content": "# -*- coding: utf-8 -*-\n"
  },
  {
    "path": "tests/test_dask_image/test_ndmorph/test__utils.py",
    "content": "#!/usr/bin/env python\n# -*- coding: utf-8 -*-\nimport pytest\nimport numpy as np\nimport dask.array as da\n\nfrom dask_image.ndmorph import _utils\n\n\n@pytest.mark.parametrize(\n    \"err_type, input, structure\",\n    [\n        (\n            RuntimeError,\n            da.ones([1, 2], dtype=bool, chunks=(1, 2,)),\n            da.arange(2, dtype=bool, chunks=(2,))\n        ),\n        (\n            TypeError,\n            da.arange(2, dtype=bool, chunks=(2,)),\n            2.0\n        ),\n    ]\n)\ndef test_errs__get_structure(err_type, input, structure):\n    with pytest.raises(err_type):\n        _utils._get_structure(input, structure)\n\n\n@pytest.mark.parametrize(\n    \"err_type, iterations\",\n    [\n        (TypeError, 0.0),\n        (NotImplementedError, 0),\n    ]\n)\ndef test_errs__get_iterations(err_type, iterations):\n    with pytest.raises(err_type):\n        _utils._get_iterations(iterations)\n\n\n@pytest.mark.parametrize(\n    \"err_type, input, mask\",\n    [\n        (\n            RuntimeError,\n            da.arange(2, dtype=bool, chunks=(2,)),\n            da.arange(1, dtype=bool, chunks=(2,))\n        ),\n        (\n            TypeError,\n            da.arange(2, dtype=bool, chunks=(2,)),\n            2.0\n        ),\n    ]\n)\ndef test_errs__get_mask(err_type, input, mask):\n    with pytest.raises(err_type):\n        _utils._get_mask(input, mask)\n\n\n@pytest.mark.parametrize(\n    \"err_type, border_value\",\n    [\n        (TypeError, 0.0),\n        (TypeError, 1.0),\n    ]\n)\ndef test_errs__get_border_value(err_type, border_value):\n    with pytest.raises(err_type):\n        _utils._get_border_value(border_value)\n\n\n@pytest.mark.parametrize(\n    \"err_type, brute_force\",\n    [\n        (NotImplementedError, True),\n        (TypeError, 1),\n    ]\n)\ndef test_errs__get_brute_force(err_type, brute_force):\n    with pytest.raises(err_type):\n        _utils._get_brute_force(brute_force)\n\n\n@pytest.mark.parametrize(\n    \"expected, input, structure\",\n    [\n        (\n            np.array([1, 1, 1], dtype=bool),\n            (da.arange(10, chunks=(10,)) % 2).astype(bool),\n            None\n        ),\n        (\n            np.array([[0, 1, 0], [1, 1, 1], [0, 1, 0]], dtype=bool),\n            (da.arange(100, chunks=10).reshape(10, 10) % 2).astype(bool),  # noqa: E501\n            None\n        ),\n        (\n            np.array([1, 1, 1], dtype=bool),\n            (da.arange(10, chunks=(10,)) % 2).astype(bool),\n            np.array([1, 1, 1], dtype=int)\n        ),\n        (\n            np.array([1, 1, 1], dtype=bool),\n            (da.arange(10, chunks=(10,)) % 2).astype(bool),\n            np.array([1, 1, 1], dtype=bool)\n        ),\n    ]\n)\ndef test__get_structure(expected, input, structure):\n    result = _utils._get_structure(input, structure)\n\n    assert expected.dtype.type is result.dtype.type\n    assert np.array((expected == result).all())[()]\n\n\n@pytest.mark.parametrize(\n    \"expected, iterations\",\n    [\n        (1, 1),\n        (4, 4),\n    ]\n)\ndef test__get_iterations(expected, iterations):\n    assert expected == _utils._get_iterations(iterations)\n\n\n@pytest.mark.parametrize(\n    \"expected, a\",\n    [\n        (np.bool_, False),\n        (np.int_, 2),\n        (np.float64, 3.1),\n        (np.complex128, 1 + 2j),\n        (np.int16, np.int16(6)),\n        (np.uint32, np.arange(3, dtype=np.uint32)),\n    ]\n)\ndef test__get_dtype(expected, a):\n    assert np.dtype(expected) is _utils._get_dtype(a)\n\n\n@pytest.mark.parametrize(\n    \"expected, input, mask\",\n    [\n        (True, da.arange(2, dtype=bool, chunks=(2,)), None),\n        (True, da.arange(2, dtype=bool, chunks=(2,)), True),\n        (False, da.arange(2, dtype=bool, chunks=(2,)), False),\n        (\n            True,\n            da.arange(2, dtype=bool, chunks=(2,)),\n            np.bool_(True)\n        ),\n        (\n            False,\n            da.arange(2, dtype=bool, chunks=(2,)),\n            np.bool_(False)\n        ),\n        (\n            np.arange(2, dtype=bool),\n            da.arange(2, dtype=bool, chunks=(2,)),\n            np.arange(2, dtype=bool)\n        ),\n        (\n            da.arange(2, dtype=bool, chunks=(2,)),\n            da.arange(2, dtype=bool, chunks=(2,)),\n            da.arange(2, dtype=int, chunks=(2,))\n        ),\n    ]\n)\ndef test__get_mask(expected, input, mask):\n    result = _utils._get_mask(input, mask)\n\n    assert type(expected) is type(result)\n\n    if isinstance(expected, (np.ndarray, da.Array)):\n        assert np.array((expected == result).all())[()]\n    else:\n        assert expected == result\n\n\n@pytest.mark.parametrize(\n    \"expected, border_value\",\n    [\n        (False, False),\n        (True, True),\n        (False, 0),\n        (True, 1),\n        (True, 5),\n        (True, -2),\n    ]\n)\ndef test__get_border_value(expected, border_value):\n    assert expected == _utils._get_border_value(border_value)\n\n\n@pytest.mark.parametrize(\n    \"expected, brute_force\",\n    [\n        (False, False),\n    ]\n)\ndef test__get_brute_force(expected, brute_force):\n    assert expected == _utils._get_brute_force(brute_force)\n"
  },
  {
    "path": "tests/test_dask_image/test_ndmorph/test_cupy_ndmorph.py",
    "content": "#!/usr/bin/env python\n# -*- coding: utf-8 -*-\n\nimport dask.array as da\nimport numpy as np\nimport pytest\n\nimport dask_image.ndmorph\n\ncupy = pytest.importorskip(\"cupy\", minversion=\"9.0.0\")\n\n\n@pytest.fixture\ndef array():\n    s = (10, 10)\n    a = da.from_array(cupy.arange(int(np.prod(s)),\n                      dtype=cupy.float32).reshape(s), chunks=5)\n    return a\n\n\n@pytest.mark.cupy\n@pytest.mark.parametrize(\"func\", [\n    dask_image.ndmorph.binary_closing,\n    dask_image.ndmorph.binary_dilation,\n    dask_image.ndmorph.binary_erosion,\n    dask_image.ndmorph.binary_opening,\n])\ndef test_cupy_ndmorph(array, func):\n    \"\"\"Test convolve & correlate filters with cupy input arrays.\"\"\"\n    result = func(array)\n    assert result.dtype == bool\n    assert result._meta.dtype == bool\n    assert isinstance(result._meta, cupy.ndarray)\n    computed = result.compute()\n    assert computed.dtype == bool\n    assert isinstance(computed, cupy.ndarray)\n"
  },
  {
    "path": "tests/test_dask_image/test_ndmorph/test_ndmorph.py",
    "content": "#!/usr/bin/env python\n# -*- coding: utf-8 -*-\nimport pytest\nimport numpy as np\nimport scipy.ndimage\n\nimport dask.array as da\nimport dask_image.ndmorph\n\n\n@pytest.mark.parametrize(\n    \"funcname\",\n    [\n        \"binary_closing\",\n        \"binary_dilation\",\n        \"binary_erosion\",\n        \"binary_opening\",\n    ]\n)\n@pytest.mark.parametrize(\n    \"err_type, input, structure, origin\",\n    [\n        (\n            RuntimeError,\n            da.ones([1, 2], dtype=bool, chunks=(1, 2,)),\n            da.arange(2, dtype=bool, chunks=(2,)),\n            0\n        ),\n        (\n            TypeError,\n            da.arange(2, dtype=bool, chunks=(2,)),\n            2.0,\n            0\n        ),\n        (\n            TypeError,\n            da.ones([2], dtype=bool, chunks=(2,)),\n            da.arange(2, dtype=bool, chunks=(2,)),\n            0.0\n        ),\n    ]\n)\ndef test_errs_binary_ops(funcname,\n                         err_type,\n                         input,\n                         structure,\n                         origin):\n    da_func = getattr(dask_image.ndmorph, funcname)\n\n    with pytest.raises(err_type):\n        da_func(\n            input,\n            structure=structure,\n            origin=origin\n        )\n\n\n@pytest.mark.parametrize(\n    \"funcname\",\n    [\n        \"binary_closing\",\n        \"binary_dilation\",\n        \"binary_erosion\",\n        \"binary_opening\",\n    ]\n)\n@pytest.mark.parametrize(\n    \"err_type, input, structure, iterations, origin\",\n    [\n        (\n            TypeError,\n            da.ones([2], dtype=bool, chunks=(2,)),\n            da.arange(2, dtype=bool, chunks=(2,)),\n            1.0,\n            0\n        ),\n        (\n            NotImplementedError,\n            da.ones([2], dtype=bool, chunks=(2,)),\n            da.arange(2, dtype=bool, chunks=(2,)),\n            0,\n            0\n        )\n    ]\n)\ndef test_errs_binary_ops_iter(funcname,\n                              err_type,\n                              input,\n                              structure,\n                              iterations,\n                              origin):\n    da_func = getattr(dask_image.ndmorph, funcname)\n\n    with pytest.raises(err_type):\n        da_func(\n            input,\n            structure=structure,\n            iterations=iterations,\n            origin=origin\n        )\n\n\n@pytest.mark.parametrize(\n    \"funcname\",\n    [\n        \"binary_closing\",\n        \"binary_dilation\",\n        \"binary_erosion\",\n        \"binary_opening\",\n    ]\n)\n@pytest.mark.parametrize(\n    \"err_type, input, structure, iterations, mask, border_value, origin\"\n    \", brute_force\",\n    [\n        (\n            RuntimeError,\n            da.ones([2], dtype=bool, chunks=(2,)),\n            da.arange(2, dtype=bool, chunks=(2,)),\n            1,\n            da.arange(2, dtype=bool, chunks=(2,))[None],\n            0,\n            0,\n            False\n        ),\n        (\n            TypeError,\n            da.ones([2], dtype=bool, chunks=(2,)),\n            da.arange(2, dtype=bool, chunks=(2,)),\n            1,\n            da.arange(2, dtype=bool, chunks=(2,)),\n            2.0,\n            0,\n            False\n        ),\n        (\n            NotImplementedError,\n            da.ones([2], dtype=bool, chunks=(2,)),\n            da.arange(2, dtype=bool, chunks=(2,)),\n            1,\n            da.arange(2, dtype=bool, chunks=(2,)),\n            0,\n            0,\n            True\n        ),\n    ]\n)\ndef test_errs_binary_ops_expanded(funcname,\n                                  err_type,\n                                  input,\n                                  structure,\n                                  iterations,\n                                  mask,\n                                  border_value,\n                                  origin,\n                                  brute_force):\n    da_func = getattr(dask_image.ndmorph, funcname)\n\n    with pytest.raises(err_type):\n        da_func(\n            input,\n            structure=structure,\n            iterations=iterations,\n            mask=mask,\n            border_value=border_value,\n            origin=origin,\n            brute_force=brute_force\n        )\n\n\n@pytest.mark.parametrize(\n    \"funcname\",\n    [\n        \"binary_closing\",\n        \"binary_dilation\",\n        \"binary_erosion\",\n        \"binary_opening\",\n    ]\n)\n@pytest.mark.parametrize(\n    \"input, structure, origin\",\n    [\n        (\n            da.from_array(\n                np.array(\n                    [[0, 0, 1, 1, 1, 0, 0, 0, 0, 0, 0, 0],\n                     [1, 0, 1, 0, 0, 0, 1, 0, 0, 0, 1, 0],\n                     [0, 0, 0, 0, 1, 0, 0, 1, 0, 0, 1, 1],\n                     [1, 1, 0, 0, 0, 1, 1, 1, 0, 1, 0, 1],\n                     [1, 1, 0, 1, 1, 1, 0, 1, 0, 1, 0, 0],\n                     [0, 0, 0, 1, 1, 1, 1, 1, 1, 0, 1, 0],\n                     [0, 1, 0, 1, 0, 0, 1, 0, 1, 0, 1, 1],\n                     [0, 1, 0, 1, 0, 1, 0, 0, 1, 0, 0, 0],\n                     [1, 0, 0, 0, 0, 1, 0, 1, 0, 0, 0, 0],\n                     [0, 0, 0, 0, 0, 0, 1, 1, 0, 1, 0, 0]],\n                    dtype=bool\n                ),\n                chunks=(5, 6)\n            ),\n            None,\n            0\n        ),\n        (\n            da.from_array(\n                np.array(\n                    [[0, 0, 1, 1, 1, 0, 0, 0, 0, 0, 0, 0],\n                     [1, 0, 1, 0, 0, 0, 1, 0, 0, 0, 1, 0],\n                     [0, 0, 0, 0, 1, 0, 0, 1, 0, 0, 1, 1],\n                     [1, 1, 0, 0, 0, 1, 1, 1, 0, 1, 0, 1],\n                     [1, 1, 0, 1, 1, 1, 0, 1, 0, 1, 0, 0],\n                     [0, 0, 0, 1, 1, 1, 1, 1, 1, 0, 1, 0],\n                     [0, 1, 0, 1, 0, 0, 1, 0, 1, 0, 1, 1],\n                     [0, 1, 0, 1, 0, 1, 0, 0, 1, 0, 0, 0],\n                     [1, 0, 0, 0, 0, 1, 0, 1, 0, 0, 0, 0],\n                     [0, 0, 0, 0, 0, 0, 1, 1, 0, 1, 0, 0]],\n                    dtype=bool\n                ),\n                chunks=(5, 6)\n            ),\n            np.ones([3, 3], dtype=bool),\n            0\n        ),\n        (\n            da.from_array(\n                np.array(\n                    [[0, 0, 1, 1, 1, 0, 0, 0, 0, 0, 0, 0],\n                     [1, 0, 1, 0, 0, 0, 1, 0, 0, 0, 1, 0],\n                     [0, 0, 0, 0, 1, 0, 0, 1, 0, 0, 1, 1],\n                     [1, 1, 0, 0, 0, 1, 1, 1, 0, 1, 0, 1],\n                     [1, 1, 0, 1, 1, 1, 0, 1, 0, 1, 0, 0],\n                     [0, 0, 0, 1, 1, 1, 1, 1, 1, 0, 1, 0],\n                     [0, 1, 0, 1, 0, 0, 1, 0, 1, 0, 1, 1],\n                     [0, 1, 0, 1, 0, 1, 0, 0, 1, 0, 0, 0],\n                     [1, 0, 0, 0, 0, 1, 0, 1, 0, 0, 0, 0],\n                     [0, 0, 0, 0, 0, 0, 1, 1, 0, 1, 0, 0]],\n                    dtype=bool\n                ),\n                chunks=(5, 6)\n            ),\n            np.ones([3, 3], dtype=bool),\n            1\n        ),\n        (\n            da.from_array(\n                np.array(\n                    [[0, 0, 1, 1, 1, 0, 0, 0, 0, 0, 0, 0],\n                     [1, 0, 1, 0, 0, 0, 1, 0, 0, 0, 1, 0],\n                     [0, 0, 0, 0, 1, 0, 0, 1, 0, 0, 1, 1],\n                     [1, 1, 0, 0, 0, 1, 1, 1, 0, 1, 0, 1],\n                     [1, 1, 0, 1, 1, 1, 0, 1, 0, 1, 0, 0],\n                     [0, 0, 0, 1, 1, 1, 1, 1, 1, 0, 1, 0],\n                     [0, 1, 0, 1, 0, 0, 1, 0, 1, 0, 1, 1],\n                     [0, 1, 0, 1, 0, 1, 0, 0, 1, 0, 0, 0],\n                     [1, 0, 0, 0, 0, 1, 0, 1, 0, 0, 0, 0],\n                     [0, 0, 0, 0, 0, 0, 1, 1, 0, 1, 0, 0]],\n                    dtype=bool\n                ),\n                chunks=(5, 6)\n            ),\n            np.ones([3, 3], dtype=bool),\n            -1\n        ),\n    ]\n)\ndef test_binary_ops(funcname,\n                    input,\n                    structure,\n                    origin):\n    da_func = getattr(dask_image.ndmorph, funcname)\n    sp_func = getattr(scipy.ndimage, funcname)\n\n    da_result = da_func(\n        input,\n        structure=structure,\n        origin=origin\n    )\n\n    sp_result = sp_func(\n        input,\n        structure=structure,\n        origin=origin\n    )\n\n    da.utils.assert_eq(sp_result, da_result)\n\n\n@pytest.mark.parametrize(\n    \"funcname\",\n    [\n        \"binary_closing\",\n        \"binary_dilation\",\n        \"binary_erosion\",\n        \"binary_opening\",\n    ]\n)\n@pytest.mark.parametrize(\n    \"input, structure, iterations, origin\",\n    [\n        (\n            da.from_array(\n                np.array(\n                    [[0, 0, 1, 1, 1, 0, 0, 0, 0, 0, 0, 0],\n                     [1, 0, 1, 0, 0, 0, 1, 0, 0, 0, 1, 0],\n                     [0, 0, 0, 0, 1, 0, 0, 1, 0, 0, 1, 1],\n                     [1, 1, 0, 0, 0, 1, 1, 1, 0, 1, 0, 1],\n                     [1, 1, 0, 1, 1, 1, 0, 1, 0, 1, 0, 0],\n                     [0, 0, 0, 1, 1, 1, 1, 1, 1, 0, 1, 0],\n                     [0, 1, 0, 1, 0, 0, 1, 0, 1, 0, 1, 1],\n                     [0, 1, 0, 1, 0, 1, 0, 0, 1, 0, 0, 0],\n                     [1, 0, 0, 0, 0, 1, 0, 1, 0, 0, 0, 0],\n                     [0, 0, 0, 0, 0, 0, 1, 1, 0, 1, 0, 0]],\n                    dtype=bool\n                ),\n                chunks=(5, 6)\n            ),\n            np.ones([3, 3], dtype=bool),\n            3,\n            0\n        ),\n        (\n            da.from_array(\n                np.array(\n                    [[0, 0, 1, 1, 1, 0, 0, 0, 0, 0, 0, 0],\n                     [1, 0, 1, 0, 0, 0, 1, 0, 0, 0, 1, 0],\n                     [0, 0, 0, 0, 1, 0, 0, 1, 0, 0, 1, 1],\n                     [1, 1, 0, 0, 0, 1, 1, 1, 0, 1, 0, 1],\n                     [1, 1, 0, 1, 1, 1, 0, 1, 0, 1, 0, 0],\n                     [0, 0, 0, 1, 1, 1, 1, 1, 1, 0, 1, 0],\n                     [0, 1, 0, 1, 0, 0, 1, 0, 1, 0, 1, 1],\n                     [0, 1, 0, 1, 0, 1, 0, 0, 1, 0, 0, 0],\n                     [1, 0, 0, 0, 0, 1, 0, 1, 0, 0, 0, 0],\n                     [0, 0, 0, 0, 0, 0, 1, 1, 0, 1, 0, 0]],\n                    dtype=bool\n                ),\n                chunks=(5, 6)\n            ),\n            np.ones([3, 3], dtype=bool),\n            3,\n            1\n        ),\n    ]\n)\ndef test_binary_ops_iter(funcname,\n                         input,\n                         structure,\n                         iterations,\n                         origin):\n    da_func = getattr(dask_image.ndmorph, funcname)\n    sp_func = getattr(scipy.ndimage, funcname)\n\n    da_result = da_func(\n        input,\n        structure=structure,\n        iterations=iterations,\n        origin=origin\n    )\n\n    sp_result = sp_func(\n        input,\n        structure=structure,\n        iterations=iterations,\n        origin=origin\n    )\n\n    da.utils.assert_eq(sp_result, da_result)\n\n\n@pytest.mark.parametrize(\n    \"funcname\",\n    [\n        \"binary_closing\",\n        \"binary_dilation\",\n        \"binary_erosion\",\n        \"binary_opening\",\n    ]\n)\n@pytest.mark.parametrize(\n    \"input, structure, iterations, mask, border_value, origin, brute_force\",\n    [\n        (\n            da.from_array(\n                np.array(\n                    [[0, 0, 1, 1, 1, 0, 0, 0, 0, 0, 0, 0],\n                     [1, 0, 1, 0, 0, 0, 1, 0, 0, 0, 1, 0],\n                     [0, 0, 0, 0, 1, 0, 0, 1, 0, 0, 1, 1],\n                     [1, 1, 0, 0, 0, 1, 1, 1, 0, 1, 0, 1],\n                     [1, 1, 0, 1, 1, 1, 0, 1, 0, 1, 0, 0],\n                     [0, 0, 0, 1, 1, 1, 1, 1, 1, 0, 1, 0],\n                     [0, 1, 0, 1, 0, 0, 1, 0, 1, 0, 1, 1],\n                     [0, 1, 0, 1, 0, 1, 0, 0, 1, 0, 0, 0],\n                     [1, 0, 0, 0, 0, 1, 0, 1, 0, 0, 0, 0],\n                     [0, 0, 0, 0, 0, 0, 1, 1, 0, 1, 0, 0]],\n                    dtype=bool\n                ),\n                chunks=(5, 6)\n            ),\n            np.ones([3, 3], dtype=bool),\n            1,\n            None,\n            1,\n            0,\n            False\n        ),\n        (\n            da.from_array(\n                np.array(\n                    [[0, 0, 1, 1, 1, 0, 0, 0, 0, 0, 0, 0],\n                     [1, 0, 1, 0, 0, 0, 1, 0, 0, 0, 1, 0],\n                     [0, 0, 0, 0, 1, 0, 0, 1, 0, 0, 1, 1],\n                     [1, 1, 0, 0, 0, 1, 1, 1, 0, 1, 0, 1],\n                     [1, 1, 0, 1, 1, 1, 0, 1, 0, 1, 0, 0],\n                     [0, 0, 0, 1, 1, 1, 1, 1, 1, 0, 1, 0],\n                     [0, 1, 0, 1, 0, 0, 1, 0, 1, 0, 1, 1],\n                     [0, 1, 0, 1, 0, 1, 0, 0, 1, 0, 0, 0],\n                     [1, 0, 0, 0, 0, 1, 0, 1, 0, 0, 0, 0],\n                     [0, 0, 0, 0, 0, 0, 1, 1, 0, 1, 0, 0]],\n                    dtype=bool\n                ),\n                chunks=(5, 6)\n            ),\n            np.ones([3, 3], dtype=bool),\n            1,\n            da.from_array(\n                np.array(\n                    [[0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0],\n                     [0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0],\n                     [0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0],\n                     [0, 0, 0, 1, 1, 1, 1, 1, 1, 0, 0, 0],\n                     [0, 0, 0, 1, 1, 1, 1, 1, 1, 0, 0, 0],\n                     [0, 0, 0, 1, 1, 1, 1, 1, 1, 0, 0, 0],\n                     [0, 0, 0, 1, 1, 1, 1, 1, 1, 0, 0, 0],\n                     [0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0],\n                     [0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0],\n                     [0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0]],\n                    dtype=bool\n                ),\n                chunks=(5, 6)\n            ),\n            0,\n            0,\n            False\n        ),\n        (\n            da.from_array(\n                np.array(\n                    [[0, 0, 1, 1, 1, 0, 0, 0, 0, 0, 0, 0],\n                     [1, 0, 1, 0, 0, 0, 1, 0, 0, 0, 1, 0],\n                     [0, 0, 0, 0, 1, 0, 0, 1, 0, 0, 1, 1],\n                     [1, 1, 0, 0, 0, 1, 1, 1, 0, 1, 0, 1],\n                     [1, 1, 0, 1, 1, 1, 0, 1, 0, 1, 0, 0],\n                     [0, 0, 0, 1, 1, 1, 1, 1, 1, 0, 1, 0],\n                     [0, 1, 0, 1, 0, 0, 1, 0, 1, 0, 1, 1],\n                     [0, 1, 0, 1, 0, 1, 0, 0, 1, 0, 0, 0],\n                     [1, 0, 0, 0, 0, 1, 0, 1, 0, 0, 0, 0],\n                     [0, 0, 0, 0, 0, 0, 1, 1, 0, 1, 0, 0]],\n                    dtype=bool\n                ),\n                chunks=(5, 6)\n            ),\n            np.ones([3, 3], dtype=bool),\n            3,\n            da.from_array(\n                np.array(\n                    [[0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0],\n                     [0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0],\n                     [0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0],\n                     [0, 0, 0, 1, 1, 1, 1, 1, 1, 0, 0, 0],\n                     [0, 0, 0, 1, 1, 1, 1, 1, 1, 0, 0, 0],\n                     [0, 0, 0, 1, 1, 1, 1, 1, 1, 0, 0, 0],\n                     [0, 0, 0, 1, 1, 1, 1, 1, 1, 0, 0, 0],\n                     [0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0],\n                     [0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0],\n                     [0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0]],\n                    dtype=bool\n                ),\n                chunks=(5, 6)\n            ),\n            0,\n            0,\n            False\n        ),\n    ]\n)\ndef test_binary_ops_expanded(funcname,\n                             input,\n                             structure,\n                             iterations,\n                             mask,\n                             border_value,\n                             origin,\n                             brute_force):\n    da_func = getattr(dask_image.ndmorph, funcname)\n    sp_func = getattr(scipy.ndimage, funcname)\n\n    da_result = da_func(\n        input,\n        structure=structure,\n        iterations=iterations,\n        mask=mask,\n        border_value=border_value,\n        origin=origin,\n        brute_force=brute_force\n    )\n\n    sp_result = sp_func(\n        input,\n        structure=structure,\n        iterations=iterations,\n        mask=mask,\n        border_value=border_value,\n        origin=origin,\n        brute_force=brute_force\n    )\n\n    da.utils.assert_eq(sp_result, da_result)\n"
  }
]