Repository: dask/dask-image Branch: main Commit: f0ee7aee4c48 Files: 99 Total size: 349.8 KB Directory structure: gitextract_iofc2rra/ ├── .coveragerc ├── .coveralls.yml ├── .editorconfig ├── .gitattributes ├── .github/ │ ├── ISSUE_TEMPLATE.md │ ├── PULL_REQUEST_TEMPLATE.md │ ├── dependabot.yml │ └── workflows/ │ └── test_and_deploy.yml ├── .gitignore ├── .readthedocs.yml ├── AUTHORS.rst ├── CONTRIBUTING.rst ├── HISTORY.rst ├── LICENSE.txt ├── MANIFEST.in ├── Makefile ├── README.rst ├── continuous_integration/ │ ├── environment-3.10.yml │ ├── environment-3.11.yml │ ├── environment-3.12.yml │ ├── environment-3.9.yml │ └── environment-doc.yml ├── dask_image/ │ ├── __init__.py │ ├── dispatch/ │ │ ├── __init__.py │ │ ├── _dispatch_ndfilters.py │ │ ├── _dispatch_ndinterp.py │ │ ├── _dispatch_ndmorph.py │ │ ├── _dispatcher.py │ │ └── _utils.py │ ├── imread/ │ │ └── __init__.py │ ├── ndfilters/ │ │ ├── __init__.py │ │ ├── _conv.py │ │ ├── _diff.py │ │ ├── _edge.py │ │ ├── _gaussian.py │ │ ├── _generic.py │ │ ├── _order.py │ │ ├── _smooth.py │ │ ├── _threshold.py │ │ └── _utils.py │ ├── ndfourier/ │ │ ├── __init__.py │ │ └── _utils.py │ ├── ndinterp/ │ │ ├── __init__.py │ │ ├── _affine_transform.py │ │ ├── _map_coordinates.py │ │ ├── _rotate.py │ │ └── _spline_filters.py │ ├── ndmeasure/ │ │ ├── __init__.py │ │ └── _utils/ │ │ ├── __init__.py │ │ ├── _find_objects.py │ │ └── _label.py │ └── ndmorph/ │ ├── __init__.py │ ├── _ops.py │ └── _utils.py ├── docs/ │ ├── Makefile │ ├── api.rst │ ├── authors.rst │ ├── conf.py │ ├── contributing.rst │ ├── coverage.rst │ ├── history.rst │ ├── index.rst │ ├── installation.rst │ ├── make.bat │ ├── quickstart.rst │ └── release/ │ ├── generate_release_notes.py │ └── release_guide.rst ├── pyproject.toml └── tests/ ├── __init__.py └── test_dask_image/ ├── test_imread/ │ ├── __init__.py │ ├── test_core.py │ └── test_cupy_imread.py ├── test_ndfilters/ │ ├── __init__.py │ ├── test__conv.py │ ├── test__diff.py │ ├── test__edge.py │ ├── test__gaussian.py │ ├── test__generic.py │ ├── test__order.py │ ├── test__smooth.py │ ├── test__threshold.py │ ├── test__utils.py │ ├── test_cupy_ndfilters.py │ └── test_cupy_threshold.py ├── test_ndfourier/ │ ├── test__utils.py │ └── test_core.py ├── test_ndinterp/ │ ├── test_affine_transformation.py │ ├── test_map_coordinates.py │ ├── test_rotate.py │ └── test_spline_filter.py ├── test_ndmeasure/ │ ├── __init__.py │ ├── test__utils.py │ ├── test_core.py │ ├── test_find_objects.py │ └── test_find_objects_no_dataframe.py └── test_ndmorph/ ├── __init__.py ├── test__utils.py ├── test_cupy_ndmorph.py └── test_ndmorph.py ================================================ FILE CONTENTS ================================================ ================================================ FILE: .coveragerc ================================================ [run] branch = True source = dask_image [report] exclude_lines = # Include the no cover pragma as it needs to be listed explicitly when # using exclude_lines. # ( http://coverage.readthedocs.io/en/coverage-4.1/excluding.html#advanced-exclusion ) pragma: no cover # Ignore coverage of code that requires the module to be executed. if __name__ == .__main__.: # Ignore continue statement in code as it can't be detected as covered # due to an optimization by the Python interpreter. See coverage issue # ( https://bitbucket.org/ned/coveragepy/issue/198/continue-marked-as-not-covered ) # and Python issue ( http://bugs.python.org/issue2506 ). continue omit = */python?.?/* */site-packages/* */eggs/* */.eggs/* *tests/* */_version.py */_vendor/* */dispatch/* ================================================ FILE: .coveralls.yml ================================================ repo_token: mu5JxVQy1FJSQvhczAzyHvaXx4qfHhF1R ================================================ FILE: .editorconfig ================================================ # http://editorconfig.org root = true [*] indent_style = space indent_size = 4 trim_trailing_whitespace = true insert_final_newline = true charset = utf-8 end_of_line = lf [*.bat] indent_style = tab end_of_line = crlf [LICENSE] insert_final_newline = false [Makefile] indent_style = tab ================================================ FILE: .gitattributes ================================================ dask_image/_version.py export-subst *.bat text eol=crlf *.sh text eol=lf *.yaml text eol=lf ================================================ FILE: .github/ISSUE_TEMPLATE.md ================================================ * dask-image version: * Python version: * Operating System: ### Description Describe what you were trying to get done. Tell us what happened, what went wrong, and what you expected to happen. ### What I Did ``` Paste the command(s) you ran and the output. If there was a crash, please include the traceback here. ``` ================================================ FILE: .github/PULL_REQUEST_TEMPLATE.md ================================================ Before you submit a pull request, check that it meets these guidelines: 1. The pull request should include tests. 2. If the pull request adds functionality, the docs should be updated. Put your new functionality into a function with a docstring, and add the feature to the list in README.rst. 3. The pull request should pass all our continuous integration checks before it is merged. ================================================ FILE: .github/dependabot.yml ================================================ # Set update schedule for GitHub Actions version: 2 updates: - package-ecosystem: "github-actions" directory: "/" schedule: # Check for updates to GitHub Actions every weekday interval: "weekly" ================================================ FILE: .github/workflows/test_and_deploy.yml ================================================ name: test_and_deploy on: push: branches: - main tags: - "v*" # Push events to matching v*, i.e. v1.0, v20.15.10 pull_request: null workflow_dispatch: null jobs: test: runs-on: ${{ matrix.os }} strategy: fail-fast: true matrix: os: ["windows-latest", "ubuntu-latest", "macos-latest"] python-version: ["3.9", "3.10", "3.11", "3.12"] steps: - name: Checkout source uses: actions/checkout@v6 - name: Setup Conda Environment uses: conda-incubator/setup-miniconda@v4 with: python-version: ${{ matrix.python-version }} environment-file: continuous_integration/environment-${{ matrix.python-version }}.yml activate-environment: dask-image-testenv auto-activate-base: false - name: Install dask-image shell: bash -l {0} run: | conda activate dask-image-testenv python -m pip install -e .[dataframe] conda list - name: Run tests shell: bash -l {0} run: pytest -v --cov=dask_image --cov-report lcov - name: Coveralls Parallel uses: coverallsapp/github-action@v2.3.7 with: github-token: ${{ secrets.github_token }} flag-name: run-${{ matrix.test_number }} parallel: true path-to-lcov: coverage.lcov test-minimal: # Verify dask-image works without the optional `dataframe` extras # (i.e. without pandas and dask[dataframe]). runs-on: ubuntu-latest steps: - name: Checkout source uses: actions/checkout@v5 - name: Set up Python uses: actions/setup-python@v6 with: python-version: "3.12" - name: Install dask-image without dataframe extras run: | python -m pip install --upgrade pip python -m pip install -e .[test] - name: Run tests (find_objects tests are skipped automatically) run: pytest -v coveralls: needs: test runs-on: ubuntu-latest steps: - name: Coveralls Finished uses: coverallsapp/github-action@v2.3.7 with: github-token: ${{ secrets.github_token }} parallel-finished: true deploy: # This will upload a Python Package using Twine when a release is created # For more information see: https://help.github.com/en/actions/language-and-framework-guides/using-python-with-github-actions#publishing-to-package-registries # # This job will run when you have tagged a commit, starting with "v*" # or created a release in GitHub which includes a tag starting with "v*" # and requires that you have put your twine API key in your # github secrets (see readme for details) needs: [test] runs-on: ubuntu-latest if: contains(github.ref, 'tags') steps: - uses: actions/checkout@v6 - name: Set up Python uses: actions/setup-python@v6 with: python-version: '3.x' - name: Install dependencies run: | python -m pip install --upgrade pip pip install build twine - name: Build and publish env: TWINE_USERNAME: ${{ secrets.PYPI_USERNAME }} TWINE_PASSWORD: ${{ secrets.PYPI_PASSWORD }} run: | python -m build twine upload dist/* ================================================ FILE: .gitignore ================================================ # setuptools-scm dynamically generated version dask_image/_version.py # Byte-compiled / optimized / DLL files __pycache__/ *.py[cod] *$py.class # C extensions *.so # Distribution / packaging .Python env/ build/ develop-eggs/ dist/ downloads/ eggs/ .eggs/ lib/ lib64/ parts/ sdist/ var/ *.egg-info/ .installed.cfg *.egg # PyInstaller # Usually these files are written by a python script from a template # before PyInstaller builds the exe, so as to inject date/other infos into it. *.manifest *.spec # Installer logs pip-log.txt pip-delete-this-directory.txt # Unit test / coverage reports htmlcov/ .tox/ .coverage .coverage.* .cache nosetests.xml coverage.xml *,cover .hypothesis/ .pytest_cache/ # Translations *.mo *.pot # Django stuff: *.log # Sphinx documentation docs/_build/ docs/dask_image*.rst # PyBuilder target/ # pyenv python configuration file .python-version ================================================ FILE: .readthedocs.yml ================================================ version: 2 build: os: "ubuntu-22.04" tools: python: "mambaforge-4.10" jobs: pre_install: # Avoid `git` treating the directory is dirty due to RTD changes. # ref: https://docs.readthedocs.io/en/stable/build-customization.html#avoid-having-a-dirty-git-index - >- git update-index --assume-unchanged continuous_integration/environment-doc.yml docs/conf.py # If we missed any, error and list the changed files. - git diff --stat --exit-code sphinx: configuration: docs/conf.py conda: environment: continuous_integration/environment-doc.yml python: install: - method: pip path: . ================================================ FILE: AUTHORS.rst ================================================ ======= Credits ======= Development Lead ---------------- * John Kirkham `@jakirkham `_ Contributors ------------ See the full list of contributors `here `_ ================================================ FILE: CONTRIBUTING.rst ================================================ .. highlight:: shell ============ Contributing ============ Contributions are welcome, and they are greatly appreciated! Every little bit helps, and credit will always be given. You can contribute in many ways: Types of Contributions ---------------------- Report Bugs ~~~~~~~~~~~ Report bugs at https://github.com/dask/dask-image/issues. If you are reporting a bug, please include: * Your operating system name and version. * Any details about your local setup that might be helpful in troubleshooting. * Detailed steps to reproduce the bug. Fix Bugs ~~~~~~~~ Look through the GitHub issues for bugs. Anything tagged with "bug" and "help wanted" is open to whoever wants to implement it. Implement Features ~~~~~~~~~~~~~~~~~~ Look through the GitHub issues for features. Anything tagged with "enhancement" and "help wanted" is open to whoever wants to implement it. Write Documentation ~~~~~~~~~~~~~~~~~~~ dask-image could always use more documentation, whether as part of the official dask-image docs, in docstrings, or even on the web in blog posts, articles, and such. To build the documentation locally and preview your changes, first set up the conda environment for building the dask-image documentation: .. code-block:: console $ conda env create -f continuous_integration/environment-doc.yml $ conda activate dask_image_doc_env This conda environment contains dask-image and its dependencies, sphinx, and the dask-sphinx-theme. Next, build the documentation with sphinx: .. code-block:: console $ cd dask-image/docs $ make html Now you can preview the html documentation in your browser by opening the file: dask-image/docs/_build/html/index.html Submit Feedback ~~~~~~~~~~~~~~~ The best way to send feedback is to file an issue at https://github.com/dask/dask-image/issues. If you are proposing a feature: * Explain in detail how it would work. * Keep the scope as narrow as possible, to make it easier to implement. * Remember that this is a volunteer-driven project, and that contributions are welcome :) Get Started! ------------ Ready to contribute? Here's how to set up `dask-image` for local development. 1. Fork the `dask-image` repo on GitHub. 2. Clone your fork locally:: $ git clone git@github.com:your_name_here/dask-image.git $ cd dask-image 3. Install your local copy into an environment. Assuming you have conda installed, this is how you set up your fork for local development (on Windows drop `source`). Replace `""` with the Python version used for testing.:: $ conda create -n dask-image-env python="" $ source activate dask-image-env $ python -m pip install -e . 4. Create a branch for local development:: $ git checkout -b name-of-your-bugfix-or-feature Now you can make your changes locally. 5. When you're done making changes, check that your changes pass flake8 and the tests, including testing other Python versions:: $ flake8 dask_image tests $ pytest To get flake8, just conda install it into your environment. 6. Commit your changes and push your branch to GitHub:: $ git add . $ git commit -m "Your detailed description of your changes." $ git push origin name-of-your-bugfix-or-feature 7. Submit a pull request through the GitHub website. Pull Request Guidelines ----------------------- Before you submit a pull request, check that it meets these guidelines: 1. The pull request should include tests. 2. If the pull request adds functionality, the docs should be updated. Put your new functionality into a function with a docstring, and add the feature to the list in README.rst. 3. The pull request should work for all supported Python versions. Check CIs and make sure that the tests pass for all supported Python versions and platforms. Testing ------- Running tests locally ~~~~~~~~~~~~~~~~~~~~~ To setup a local testing environment that matches the test environments we use for our continuous integration services, you can use the ``.yml`` conda environment files included in the ``continuous_integration`` folder in the dask-image repository. There is a separate environment file for each supported Python version. We will use conda to `create an environment from a file`_ (``conda env create -f name-of-environment-file.yml``). .. note:: If you don't have `conda`_ installed, we recommend downloading and installing it with the conda-forge distribution `Miniforge`_. .. code-block:: console $ conda env create -f continuous_integration/environment-latest.yml This command will create a new conda test environment called ``dask-image-testenv`` with all required dependencies. Now you can activate your new testing environment with:: .. code-block:: console $ conda activate dask-image-testenv Finally, install the development version of dask-image:: .. code-block:: console $ pip install -e ".[test]"" For local testing, please run ``pytest`` in the test environment:: .. code-block:: console $ pytest To run a subset of tests, for example all the tests for ndfourier:: $ pytest tests/test_dask_image/test_ndfourier .. _create an environment from a file: https://docs.conda.io/projects/conda/en/latest/user-guide/tasks/manage-environments.html#creating-an-environment-from-an-environment-yml-file .. _conda: https://conda.io/en/latest/ .. _Miniforge: https://conda-forge.org/download/ Continuous integration tests ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ Creating a pull request will automatically run the continuous integration tests with Github Actions. Results from the continuous integration (CI) checks are shown linked at the bottom of your pull request, and also in the dask-image GitHub Actions tab: https://github.com/dask/dask-image/actions To edit the CI checks, see the workflow scripts in the repository located in ``dask-image/.github/workflows`` GPU continuous integration ^^^^^^^^^^^^^^^^^^^^^^^^^^^ GPU nightly testing is run in the `rapidsai/dask-upstream-testing repo `_ ================================================ FILE: HISTORY.rst ================================================ ======= History ======= v2025.11.0 (2025-11-12) ----------------------- We're pleased to announce the release of dask-image v2025.11.0! Highlights The key highlight of this release is that Marvin Albert added a dask-image implementation of the scipy.ndimage.map_coordinates function (#237). There have also been improvements to the documentation. New Features * Implement support for ndimage.map_coordinates (#237) Improvements * Use `tifffile.TiffWriter`'s `write` method in `test_cupy_imread` (#398) * Recommend dask.array.image.imread over dask-image imread (#410) * Expand dask_image.imread.imread docstring (#411) * Add spline filter docstrings (#412) * Fix typo (#402) Maintenance * ReadTheDocs: fix displayed version number in top left corner (#379) * Display dev version numbers on ReadTheDocs latest (#380) * Update conf.py, sphinx context injection deprecated in ReadTheDocs (#383) * fix KeyError: "None of [Index(['0_x', '1_x', '0_y', '1_y'], dtype='object')] are in the [columns]" in find_objects (#384) * Bump coverallsapp/github-action from 2.3.0 to 2.3.4 (#390) * Fix CI test failures (#393) * Recommend miniforge conda installer in docs (#395) * Update pytest config key (#396) * Drop gpuCI & ref dask-upstream-testing (#401) * Bump actions/checkout from 4 to 5 (#406) * Bump actions/setup-python from 5 to 6 (#408) * Maintenance: split ndinterp long __init__.py file functions into separate files (#416) 9 authors added to this release (alphabetical) * `David Haberthür `_ - @habi * `David Stansby `_ - @dstansby * `dependabot[bot] `_ - @dependabot[bot] * `Genevieve Buckley `_ - @GenevieveBuckley * `jakirkham `_ - @jakirkham * `Joshua Gould `_ - @joshua-gould * `Kimberly Meechan `_ - @K-Meech * `Marvin Albert `_ - @m-albert * `Tom Augspurger `_ - @TomAugspurger 4 reviewers added to this release (alphabetical) * `Genevieve Buckley `_ - @GenevieveBuckley * `jakirkham `_ - @jakirkham * `Marvin Albert `_ - @m-albert * `Thomas Robitaille `_ - @astrofrog 2024.5.0 (2024-05-17) ---------------------- We're pleased to announce the release of dask-image 2024.5.0! Highlights Highlights of this release include: * Martin Schorb adding 'rotate', 'spline_filter' and 'spline_filter1d' functions (#213) * Erik Holmgren adding functionality to allow wrapping labels over array boundaries (#344), and * Christoph Sommer's work allowing aicsimageio and other da.core.Array sub-classes as input arrays (#361) New Features * Add the rotate, spline_filter, and spline_filter1d functions to ndimage (#213) * Wrapping labels over array boundaries (#344) * Add python 3.12 support (#370) Improvements * Relaxed type check of input array, to allow da.core.Array sub-classes… (#361) * Update slice index comment to reflect code change (#353) Maintenance * Switch to pyproject.toml package setup, replace versioneer with setuptools-scm (#306) * Fix cupy pytest errors (#368) * Switch to newer GPU CI images (#345) * Bump GPU CI to CUDA 11.8 (#348) * Maintenance: fix CI test errors (#366) * Update CI test environments (#367) * Additions to release guide and change to release note generation script (#339) * Fix typo in pull request template (#347) * Workaround for the sphinx version problem in the readthedocs build environment (#354) * Pin dask to 2024.4.1 to avoid error during dask.dataframe import with python 3.11.9 (#363) * Get rid of distutils dependency -- Depend on newer scipy (#346) * Bump actions/checkout from 3 to 4 (#342) * Bump actions/setup-python from 4 to 5 (#350) * Bump coverallsapp/github-action from 2.2.1 to 2.2.3 (#343) * Bump conda-incubator/setup-miniconda from 2 to 3 (#349) * Bump coverallsapp/github-action from 2.2.3 to 2.3.0 (#365) * Update versioneer to version 0.29 for compatibility with python 3.12 (#357) 9 authors added to this release (alphabetical) * `Charles Blackmon-Luca `_ - @charlesbluca * `Christoph Sommer `_ - @sommerc * `dependabot[bot] `_ - @dependabot[bot] * `Erik Holmgren `_ - @Holmgren825 * `Genevieve Buckley `_ - @GenevieveBuckley * `jakirkham `_ - @jakirkham * `Mark Harfouche `_ - @hmaarrfk * `Martin Schorb `_ - @martinschorb * `Marvin Albert `_ - @m-albert 5 reviewers added to this release (alphabetical) * `Erik Holmgren `_ - @Holmgren825 * `Genevieve Buckley `_ - @GenevieveBuckley * `jakirkham `_ - @jakirkham * `Juan Nunez-Iglesias `_ - @jni * `Marvin Albert `_ - @m-albert 2023.08.1 (2023-08-04) ---------------------- We're pleased to announce the release of dask-image 2023.08.1! This is a patch release to complete the dropping of python 3.8 in the previous release. * Use `>=3.9` in `python_requires` in `setup.py` (#336) 2 authors added to this release (alphabetical) * `jakirkham `_ - @jakirkham * `Marvin Albert `_ - @m-albert 0 reviewers added to this release (alphabetical) 2023.08.0 (2023-08-03) ---------------------- We're pleased to announce the release of dask-image 2023.08.0! Highlights This version fixes bugs related to processing CuPy backed dask arrays and improves testing on GPU CI. It drops support for python 3.8 and adds pandas as a dependency. As a feature improvement, the dask-image equivalent of ``scipy.ndimage.label`` now supports arbitrary structuring elements. For full support of all GPU functionality in dask-image we recommend using CuPy version 9.0.0 or higher. Improvements * Generalised ndmeasure.label to arbitrary structuring elements (#321) Bug Fixes * Added missing cupy test mark and fixed cupy threshold (#329) * Moved functions from ndimage submodules to ndimage namespace (#325) Updated requirements * Drop Python 3.8, in accordance with NEP29 recommendation (#315) * Require NumPy 1.18+ (#304) * Add pandas requirement for find_objs function (#309) Build Tools * Continuous integration * Update GPU conda environment before running tests (#318) * Fix GitHub actions README badge (#323) * Dependabot updates * Bump coverallsapp/github-action from 2.0.0 to 2.1.2 (#313) * Bump coverallsapp/github-action from 2.1.2 to 2.2.0 (#322) * Bump coverallsapp/github-action from 2.2.0 to 2.2.1 (#326) 6 authors added to this release (alphabetical) * `Charles Blackmon-Luca `_ - @charlesbluca * `David Stansby `_ - @dstansby * `dependabot[bot] `_ - @dependabot[bot] * `Genevieve Buckley `_ - @GenevieveBuckley * `jakirkham `_ - @jakirkham * `Marvin Albert `_ - @m-albert 4 reviewers added to this release (alphabetical) * `Charles Blackmon-Luca `_ - @charlesbluca * `Genevieve Buckley `_ - @GenevieveBuckley * `jakirkham `_ - @jakirkham * `Juan Nunez-Iglesias `_ - @jni v2023.03.0 (2023-03-27) ----------------------- We're pleased to announce the release of dask-image v2023.03.0! Highlights This version of dask-image drops support for python 3.7, now requires a minimum Dask version of 2021.10.0 or higher (due to a security patch), and makes tifffile a regular requirement. We also now build and publish wheel files to PyPI. Improvements * Documentation * Add GPU CI info to contributing docs (#300) * Docs: add GPU support info to coverage table (#301) * Testing * Test `gaussian` alias (#287) * Update NaN block size tests for threshold_local function (#289) * Test `find_objects` w/incorrect array type (#292) Deprecations and updated requirements * Update supported python versions to 3.8, 3.9, 3.10, & 3.11 (drop python 3.7) (#284) * Security update: Dask v2021.10.0 as minimum allowable version (#288) * Make tifffile regular requirement (#295) Build Tools * Continuous integration * Refresh doc environment (#273) * Setup Coveralls with GitHub Actions (#274) * Pin to jinja2<3.1 to avoid Readthedocs build error (#278) * Updates `setup.py`'s Python versions (#285) * Combine CI workflows for testing and release upload to PyPI (#291) * Enable option to restart GHA (#293) * Readd `environment-latest.yml` symlink (#294) * Add python 3.10 to gpuCI matrix (#298) * Releases * ENH: Build and publish wheels in GitHub CI (#272) * Update release notes script (#299) * Release notes for v2022.09.0 (#270) * Dependabot updates * Create dependabot.yml (#279) * Bump actions/setup-python from 2 to 4 (#280) * Bump actions/checkout from 2 to 3 (#281) * Bump coverallsapp/github-action from 1.1.3 to 1.2.2 (#282) * Bump coverallsapp/github-action from 1.2.2 to 1.2.4 (#283) * Bump coverallsapp/github-action from 1.2.4 to 2.0.0 (#296) Other Pull Requests * Group all imread functions together in the same file (#290) 7 authors added to this release (alphabetical) * `Charles Blackmon-Luca `_ - @charlesbluca * `dependabot[bot] `_ - @dependabot[bot] * `Genevieve Buckley `_ - @GenevieveBuckley * `jakirkham `_ - @jakirkham * `Marvin Albert `_ - @m-albert * `Matt McCormick `_ - @thewtex * `Volker Hilsenstein `_ - @VolkerH 3 reviewers added to this release (alphabetical) * `Genevieve Buckley `_ - @GenevieveBuckley * `jakirkham `_ - @jakirkham * `Matt McCormick `_ - @thewtex v2022.09.0 (2022-09-19) ----------------------- We're pleased to announce the release of dask-image v2022.09.0! Not much has changed since the last release. Volker Hilsenstein has improved imread, which now uses natural sorting for strings. Fred Blunt has fixed deprecation warnings from scipy.ndimage, and we've also done some miscellaneous maintenance work. Improvements * Use natural sorting in `imread(...)` when globbing multiple files (#265) * Avoid DeprecationWarnings when importing scipy.ndimage filter functions (#261) Maintenance * Remove/add testing for python 3.6/3.9, update CI pinnings (#257) * Update docs theme for rebranding (#263) * Run CI on `main` (#264) 6 authors added to this release (alphabetical) * `Charles Blackmon-Luca `_ - @charlesbluca * `Fred Bunt `_ - @fbunt * `Genevieve Buckley `_ - @GenevieveBuckley * `jakirkham `_ - @jakirkham * `Sarah Charlotte Johnson `_ - @scharlottej13 * `Volker Hilsenstein `_ - @VolkerH 3 reviewers added to this release (alphabetical) * `Charles Blackmon-Luca `_ - @charlesbluca * `Genevieve Buckley `_ - @GenevieveBuckley * `jakirkham `_ - @jakirkham 2021.12.0 ---------- We're pleased to announce the release of dask-image 2021.12.0! Highlights The major highlights of this release include the introduction of new featurees for ``find_objects`` and spline filters. We have also moved to using CalVer (calendar version numbers) to match the main Dask project. New Features * Find objects bounding boxes (#240) * Add spline_filter and spline_filter1d (#215) Improvements * ENH: add remaining kwargs to binary_closing and binary_opening (#221) * ndfourier: support n > 0 (for rfft) and improve performance (#222) * affine_transform: increased shape of required input array slices (#216) Bug Fixes * BUG: add missing import of warnings in dask_image.ndmeasure (#224) * Fix wrap bug in ndfilters convolve and correlate (#243) * Upgrade for compatibility with latest dask release (#241) Test infrastructure * GitHub actions testing (#188) * Set up gpuCI testing on PRs (#248) * Remove `RAPIDS_VER` axis, bump `CUDA_VER` in gpuCI matrix (#249) Documentation updates * Code style cleanup (#227) * Remove out of date email address, strip __author__ & __email__ (#225) * Update release guide, Dask CalVer uses YYYY.MM.DD (#236) * Update min python version in setup.py (#250) * Use new Dask docs theme (#245) * Docs: Add `find_objects` to the coverage table (#254) Other Pull Requests * Switch to CalVer (calendar versioning) (#233) 6 authors added to this release (alphabetical) * `anlavandier `_ - @anlavandier * `Charles Blackmon-Luca `_ - @charlesbluca * `Genevieve Buckley `_ - @GenevieveBuckley * `Gregory R. Lee `_ - @grlee77 * `Jacob Tomlinson `_ - @jacobtomlinson * `Marvin Albert `_ - @m-albert 6 reviewers added to this release (alphabetical) * `anlavandier `_ - @anlavandier * `Genevieve Buckley `_ - @GenevieveBuckley * `Gregory R. Lee `_ - @grlee77 * `Jacob Tomlinson `_ - @jacobtomlinson * `jakirkham `_ - @jakirkham * `Marvin Albert `_ - @m-albert 0.6.0 (2021-05-06) ------------------ We're pleased to announce the release of dask-image 0.6.0! Highlights The highlights of this release include GPU support for binary morphological functions, and improvements to the performance of ``imread``. Cupy version 9.0.0 or higher is required for GPU support of the ``ndmorph`` subpackage. Cupy version 7.7.0 or higher is required for GPU support of the ``ndfilters`` and ``imread`` subpackages. New Features * GPU support for ndmorph subpackage: binary morphological functions (#157) Improvements * Improve imread performance: reduced overhead of pim.open calls when reading from image sequence (#182) Bug Fixes * dask-image imread v0.5.0 not working with dask distributed Client & napari (#194) * Not able to map actual image name with dask_image.imread (#200, fixed by #182) * affine_transform: Remove inconsistencies with ndimage implementation #205 API Changes * Add alias ``gaussian`` pointing to ``gaussian_filter`` (#193) Other Pull Requests * Change default branch from master to main (#185) * Fix rst formatting in release_guide.rst (#186) 4 authors added to this release (alphabetical) * `Genevieve Buckley `_ - @GenevieveBuckley * `Julia Signell `_ - @jsignell * `KM Goh `_ - @K-Monty * `Marvin Albert `_ - @m-albert 2 reviewers added to this release (alphabetical) * `Genevieve Buckley `_ - @GenevieveBuckley * `KM Goh `_ - @K-Monty 0.5.0 (2021-02-01) ------------------ We're pleased to announce the release of dask-image 0.5.0! Highlights The biggest highlight of this release is our new affine transformation feature, contributed by Marvin Albert. The SciPy Japan sprint in November 2020 led to many improvements, and I'd like to recognise the hard work by Tetsuo Koyama and Kuya Takami. Special thanks go to everyone who joined us at the conference! New Features * Affine transformation feature added: from dask_image.ndinterp import affine_transform (#159) * GPU support added for local_threshold with method='mean' (#158) * Pathlib input now accepted for imread functions (#174) Improvements * Performance improvement for 'imread', we now use `da.map_blocks` instead of `da.concatenate` (#165) Bug Fixes * Fixed imread tests (add `contiguous=True` when saving test data with tifffile) (#164) * FIXed scipy LooseVersion for sum_labels check (#176) API Changes * 'sum' is renamed to 'sum_labels' and a add deprecation warning added (#172) Documentation improvements * Add section Talks and Slides #163 (#169) * Add link to SciPy Japan 2020 talk (#171) * Add development guide to setup environment and run tests (#170) * Update information in AUTHORS.rst (#167) Maintenance * Update dependencies in Read The Docs environment (#168) 6 authors added to this release (alphabetical) * `Fabian Chong `_ - @feiming * `Genevieve Buckley `_ - @GenevieveBuckley * `jakirkham `_ - @jakirkham * `Kuya Takami `_ - @ku-ya * `Marvin Albert `_ - @m-albert * `Tetsuo Koyama `_ - @tkoyama010 7 reviewers added to this release (alphabetical) * `Fabian Chong `_ - @feiming * `Genevieve Buckley `_ - @GenevieveBuckley * `Gregory R. Lee `_ - @grlee77 * `jakirkham `_ - @jakirkham * `Juan Nunez-Iglesias `_ - @jni * `Marvin Albert `_ - @m-albert * `Tetsuo Koyama `_ - @tkoyama010 0.4.0 (2020-09-02) ------------------ We're pleased to announce the release of dask-image 0.4.0! Highlights The major highlight of this release is support for cupy GPU arrays for dask-image subpackages imread and ndfilters. Cupy version 7.7.0 or higher is required to use this functionality. GPU support for the remaining dask-image subpackages (ndmorph, ndfourier, and ndmeasure) will be rolled out at a later date, beginning with ndmorph. We also have a new function, threshold_local, similar to the scikit-image local threshold function. Lastly, we've made more improvements to the user documentation, which includes work by new contributor @abhisht51. New Features * GPU support for ndfilters & imread modules (#151) * threshold_local function for dask-image ndfilters (#112) Improvements * Add function coverage table to the dask-image docs (#155) * Developer documentation: release guide (#142) * Use tifffile for testing instead of scikit-image (#145) 3 authors added to this release (alphabetical) * `Abhisht Singh `_ - @abhisht51 * `Genevieve Buckley `_ - @GenevieveBuckley * `jakirkham `_ - @jakirkham 2 reviewers added to this release (alphabetical) * `Genevieve Buckley `_ - @GenevieveBuckley * `Juan Nunez-Iglesias `_ - @jni 0.3.0 (2020-06-06) ------------------ We're pleased to announce the release of dask-image 0.3.0! Highlights * Python 3.8 is now supported (#131) * Support for Python 2.7 and 3.5 has been dropped (#119) (#131) * We have a dask-image quickstart guide (#108), available from the dask examples page: https://examples.dask.org/applications/image-processing.html New Features * Distributed labeling has been implemented (#94) * Area measurement function added to dask_image.ndmeasure (#115) Improvements * Optimize out first `where` in `label` (#102) Bug Fixes * Bugfix in `center_of_mass` to correctly handle integer input arrays (#122) * Test float cast in `_norm_args` (#105) * Handle Dask's renaming of `atop` to `blockwise` (#98) API Changes * Rename the input argument to image in the ndimage functions (#117) * Rename labels in ndmeasure function arguments (#126) Support * Update installation instructions so conda is the preferred method (#88) * Add Python 3.7 to Travis CI (#89) * Add instructions for building docs with sphinx to CONTRIBUTING.rst (#90) * Sort Python 3.7 requirements (#91) * Use double equals for exact package versions (#92) * Use flake8 (#93) * Note Python 3.7 support (#95) * Fix the Travis MacOS builds (update XCode to version 9.4 and use matplotlib 'Agg' backend) (#113) 7 authors added to this release (alphabetical) * `Amir Khalighi `_ - @akhalighi * `Elliana May `_ - @Mause * `Genevieve Buckley `_ - @GenevieveBuckley * `jakirkham `_ - @jakirkham * `Jaromir Latal `_ - @jermenkoo * `Juan Nunez-Iglesias `_ - @jni * `timbo8 `_ - @timbo8 2 reviewers added to this release (alphabetical) - `Genevieve Buckley `_ - @GenevieveBuckley - `jakirkham `_ - @jakirkham 0.2.0 (2018-10-10) ------------------ * Construct separate label masks in `labeled_comprehension` (#82) * Use `full` to construct 1-D NumPy array (#83) * Use NumPy's `ndindex` in `labeled_comprehension` (#81) * Cleanup `test_labeled_comprehension_struct` (#80) * Use 1-D structured array fields for position-based kernels in `ndmeasure` (#79) * Rewrite `center_of_mass` using `labeled_comprehension` (#78) * Adjust `extrema`'s internal structured type handling (#77) * Test labeled_comprehension with object type (#76) * Rewrite `histogram` to use `labeled_comprehension` (#75) * Use labeled_comprehension directly in more function in ndmeasure (#74) * Update mean's variables to match other functions (#73) * Consolidate summation in `_ravel_shape_indices` (#72) * Update HISTORY for 0.1.2 release (#71) * Bump dask-sphinx-theme to 1.1.0 (#70) 0.1.2 (2018-09-17) ------------------ * Ensure `labeled_comprehension`'s `default` is 1D. (#69) * Bump dask-sphinx-theme to 1.0.5. (#68) * Use nout=2 in ndmeasure's label. (#67) * Use custom kernel for extrema. (#61) * Handle structured dtype in labeled_comprehension. (#66) * Fixes for `_unravel_index`. (#65) * Bump dask-sphinx-theme to 1.0.4. (#64) * Unwrap some lines. (#63) * Use dask-sphinx-theme. (#62) * Refactor out `_unravel_index` function. (#60) * Divide `sigma` by `-2`. (#59) * Use Python 3's definition of division in Python 2. (#58) * Force dtype of `prod` in `_ravel_shape_indices`. (#57) * Drop vendored compatibility code. (#54) * Drop vendored copy of indices and uses thereof. (#56) * Drop duplicate utility tests from `ndmorph`. (#55) * Refactor utility module for imread. (#53) * Reuse `ndfilter` utility function in `ndmorph`. (#52) * Cleanup freq_grid_i construction in _get_freq_grid. (#51) * Use shared Python 2/3 compatibility module. (#50) * Consolidate Python 2/3 compatibility code. (#49) * Refactor Python 2/3 compatibility from imread. (#48) * Perform `2 * pi` first in `_get_ang_freq_grid`. (#47) * Ensure `J` is negated first in `fourier_shift`. (#46) * Breakout common changes in fourier_gaussian. (#45) * Use conda-forge badge. (#44) 0.1.1 (2018-08-31) ------------------ * Fix a bug in an ndmeasure test of an internal function. 0.1.0 (2018-08-31) ------------------ * First release on PyPI. * Pulls in content from dask-image org. * Supports reading of image files into Dask. * Provides basic N-D filters with options to extend. * Provides a few N-D Fourier filters. * Provides a few N-D morphological filters. * Provides a few N-D measurement functions for label images. * Has 100% line coverage in test suite. ================================================ FILE: LICENSE.txt ================================================ Copyright (c) 2017-2018, dask-image Developers (see AUTHORS.rst for details) All rights reserved. Redistribution and use in source and binary forms, with or without modification, are permitted provided that the following conditions are met: 1. Redistributions of source code must retain the above copyright notice, this list of conditions and the following disclaimer. 2. Redistributions in binary form must reproduce the above copyright notice, this list of conditions and the following disclaimer in the documentation and/or other materials provided with the distribution. 3. Neither the name of the copyright holder nor the names of its contributors may be used to endorse or promote products derived from this software without specific prior written permission. THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. ================================================ FILE: MANIFEST.in ================================================ include AUTHORS.rst include CONTRIBUTING.rst include HISTORY.rst include LICENSE.txt include README.rst recursive-include tests * recursive-exclude * __pycache__ recursive-exclude * *.py[co] recursive-include docs *.rst conf.py Makefile make.bat *.jpg *.png *.gif include dask_image/_version.py ================================================ FILE: Makefile ================================================ .PHONY: clean clean-test clean-pyc clean-build docs help .DEFAULT_GOAL := help define BROWSER_PYSCRIPT import os, webbrowser, sys try: from urllib import pathname2url except: from urllib.request import pathname2url webbrowser.open("file://" + pathname2url(os.path.abspath(sys.argv[1]))) endef export BROWSER_PYSCRIPT define PRINT_HELP_PYSCRIPT import re, sys for line in sys.stdin: match = re.match(r'^([a-zA-Z_-]+):.*?## (.*)$$', line) if match: target, help = match.groups() print("%-20s %s" % (target, help)) endef export PRINT_HELP_PYSCRIPT BROWSER := python -c "$$BROWSER_PYSCRIPT" help: @python -c "$$PRINT_HELP_PYSCRIPT" < $(MAKEFILE_LIST) clean: clean-build clean-pyc clean-test ## remove all build, test, coverage and Python artifacts clean-build: ## remove build artifacts rm -fr build/ rm -fr dist/ rm -fr .eggs/ find . -name '*.egg-info' -exec rm -fr {} + find . -name '*.egg' -exec rm -f {} + clean-pyc: ## remove Python file artifacts find . -name '*.pyc' -exec rm -f {} + find . -name '*.pyo' -exec rm -f {} + find . -name '*~' -exec rm -f {} + find . -name '__pycache__' -exec rm -fr {} + clean-test: ## remove test and coverage artifacts rm -fr .tox/ rm -f .coverage rm -fr htmlcov/ lint: ## check style with flake8 flake8 dask_image tests test: ## run tests quickly with the default Python python -m pip install ".[test]" pytest test-all: ## run tests on every Python version with tox tox coverage: ## check code coverage quickly with the default Python coverage run -m pytest coverage report -m coverage html $(BROWSER) htmlcov/index.html docs: ## generate Sphinx HTML documentation, including API docs rm -f docs/dask_image.rst rm -f docs/modules.rst sphinx-apidoc -o docs/ dask_image $(MAKE) -C docs clean $(MAKE) -C docs html $(BROWSER) docs/_build/html/index.html release: clean ## package and upload a release python -m build ls -l dist twine upload dist/* dist: clean ## builds source and wheel package python -m build ls -l dist install: clean ## install the package to the active Python's site-packages python -m pip install . ================================================ FILE: README.rst ================================================ ========== dask-image ========== .. image:: https://img.shields.io/pypi/v/dask-image.svg :target: https://pypi.python.org/pypi/dask-image :alt: PyPI .. image:: https://img.shields.io/conda/vn/conda-forge/dask-image.svg :target: https://anaconda.org/conda-forge/dask-image :alt: conda-forge .. image:: https://github.com/dask/dask-image/actions/workflows/test_and_deploy.yml/badge.svg :target: https://github.com/dask/dask-image/actions/workflows/test_and_deploy.yml :alt: GitHub Actions CI .. image:: https://readthedocs.org/projects/dask-image/badge/?version=latest :target: https://dask-image.readthedocs.io/en/latest/?badge=latest :alt: Read the Docs .. image:: https://coveralls.io/repos/github/dask/dask-image/badge.svg :target: https://coveralls.io/github/dask/dask-image :alt: Coveralls .. image:: https://img.shields.io/github/license/dask/dask-image.svg :target: ./LICENSE.txt :alt: License Distributed image processing * Free software: BSD 3-Clause * Documentation: https://dask-image.readthedocs.io. ================================================ FILE: continuous_integration/environment-3.10.yml ================================================ name: dask-image-testenv channels: - conda-forge dependencies: - python=3.10.* - pip==23.0.1 - coverage==7.2.1 - flake8==6.0.0 - pytest==7.2.2 - pytest-cov==4.0.0 - pytest-flake8==1.3.0 - pytest-timeout >=2.3.1 - dask==2024.4.1 - numpy==1.24.2 - scipy==1.10.1 - scikit-image==0.19.3 - pims==0.6.1 - slicerator==1.1.0 - pandas==2.0.0 - twine==5.0.0 - pip: - build==1.2.1 ================================================ FILE: continuous_integration/environment-3.11.yml ================================================ name: dask-image-testenv channels: - conda-forge dependencies: - python=3.11.* - pip==23.0.1 - coverage==7.2.1 - flake8==6.0.0 - pytest==7.2.2 - pytest-cov==4.0.0 - pytest-flake8==1.3.0 - pytest-timeout >=2.3.1 - dask==2024.4.1 - numpy==1.24.2 - scipy==1.10.1 - scikit-image==0.19.3 - pims==0.6.1 - slicerator==1.1.0 - pandas==2.0.0 - twine==5.0.0 - pip: - build==1.2.1 ================================================ FILE: continuous_integration/environment-3.12.yml ================================================ name: dask-image-testenv channels: - conda-forge dependencies: - python=3.12.* - pip==24.0 - coverage==7.5.1 - flake8==7.0.0 - pytest==8.2.0 - pytest-cov==5.0.0 - pytest-flake8==1.3.0 - pytest-timeout >=2.3.1 - dask==2024.4.1 - numpy==1.26.4 - scipy==1.13.0 - scikit-image==0.22.0 - pims==0.6.1 - slicerator==1.1.0 - pandas==2.2.2 - twine==5.0.0 - pip: - build==1.2.1 ================================================ FILE: continuous_integration/environment-3.9.yml ================================================ name: dask-image-testenv channels: - conda-forge dependencies: - python=3.9.* - pip==24.0 - coverage==7.5.1 - flake8==7.0.0 - pytest==8.2.0 - pytest-cov==5.0.0 - pytest-flake8==1.3.0 - pytest-timeout >=2.3.1 - dask==2024.4.1 - numpy==1.26.4 - scipy==1.13.0 - scikit-image==0.22.0 - pims==0.6.1 - slicerator==1.1.0 - pandas==2.2.2 - twine==5.0.0 - pip: - build==1.2.1 ================================================ FILE: continuous_integration/environment-doc.yml ================================================ name: dask_image_doc_env channels: - conda-forge dependencies: - python=3.9.* - pip==22.3 - jinja2<3.1 - dask==2024.4.1 - numpy==1.23.4 - scipy==1.9.2 - scikit-image==0.19.3 - pims==0.6.1 - slicerator==1.1.0 - pandas==2.0.0 - pip: - build==1.2.1 - .. # install dask_image from this repository source # FIXME: This workaround is required until we have sphinx>=5, as enabled by # dask-sphinx-theme no longer pinning sphinx-book-theme==0.2.0. This is # tracked in https://github.com/dask/dask-sphinx-theme/issues/68. # Once sphinx>=5 is available, we can remove this workaround. - dask-sphinx-theme>=3.0.0 - sphinx>=4.0.0 - sphinxcontrib-applehelp>=1.0.0,<1.0.7 - sphinxcontrib-devhelp>=1.0.0,<1.0.6 - sphinxcontrib-htmlhelp>=2.0.0,<2.0.5 - sphinxcontrib-serializinghtml>=1.1.0,<1.1.10 - sphinxcontrib-qthelp>=1.0.0,<1.0.7 ================================================ FILE: dask_image/__init__.py ================================================ ================================================ FILE: dask_image/dispatch/__init__.py ================================================ ================================================ FILE: dask_image/dispatch/_dispatch_ndfilters.py ================================================ # -*- coding: utf-8 -*- import numpy as np import scipy.ndimage from ._dispatcher import Dispatcher __all__ = [ "dispatch_convolve", "dispatch_correlate", "dispatch_laplace", "dispatch_prewitt", "dispatch_sobel", "dispatch_gaussian_filter", "dispatch_gaussian_gradient_magnitude", "dispatch_gaussian_laplace", "dispatch_generic_filter", "dispatch_minimum_filter", "dispatch_median_filter", "dispatch_maximum_filter", "dispatch_rank_filter", "dispatch_percentile_filter", "dispatch_uniform_filter", "dispatch_threshold_local_mean", ] dispatch_convolve = Dispatcher(name="dispatch_convolve") dispatch_correlate = Dispatcher(name="dispatch_correlate") dispatch_laplace = Dispatcher(name="dispatch_laplace") dispatch_prewitt = Dispatcher(name="dispatch_prewitt") dispatch_sobel = Dispatcher(name="dispatch_sobel") dispatch_gaussian_filter = Dispatcher(name="dispatch_gaussian_filter") dispatch_gaussian_gradient_magnitude = Dispatcher(name="dispatch_gaussian_gradient_magnitude") # noqa: E501 dispatch_gaussian_laplace = Dispatcher(name="dispatch_gaussian_laplace") dispatch_generic_filter = Dispatcher(name="dispatch_generic_filter") dispatch_minimum_filter = Dispatcher(name="dispatch_minimum_filter") dispatch_median_filter = Dispatcher(name="dispatch_median_filter") dispatch_maximum_filter = Dispatcher(name="dispatch_maximum_filter") dispatch_rank_filter = Dispatcher(name="dispatch_rank_filter") dispatch_percentile_filter = Dispatcher(name="dispatch_percentile_filter") dispatch_uniform_filter = Dispatcher(name="dispatch_uniform_filter") dispatch_threshold_local_mean = Dispatcher(name="dispatch_threshold_local_mean") # noqa: E501 # ================== convolve ================== @dispatch_convolve.register(np.ndarray) def numpy_convolve(*args, **kwargs): return scipy.ndimage.convolve @dispatch_convolve.register_lazy("cupy") def register_cupy_convolve(): import cupy import cupyx.scipy.ndimage @dispatch_convolve.register(cupy.ndarray) def cupy_convolve(*args, **kwargs): return cupyx.scipy.ndimage.convolve # ================== correlate ================== @dispatch_correlate.register(np.ndarray) def numpy_correlate(*args, **kwargs): return scipy.ndimage.correlate @dispatch_correlate.register_lazy("cupy") def register_cupy_correlate(): import cupy import cupyx.scipy.ndimage @dispatch_correlate.register(cupy.ndarray) def cupy_correlate(*args, **kwargs): return cupyx.scipy.ndimage.correlate # ================== laplace ================== @dispatch_laplace.register(np.ndarray) def numpy_laplace(*args, **kwargs): return scipy.ndimage.laplace @dispatch_laplace.register_lazy("cupy") def register_cupy_laplace(): import cupy import cupyx.scipy.ndimage @dispatch_laplace.register(cupy.ndarray) def cupy_laplace(*args, **kwargs): return cupyx.scipy.ndimage.laplace # ================== prewitt ================== @dispatch_prewitt.register(np.ndarray) def numpy_prewitt(*args, **kwargs): return scipy.ndimage.prewitt @dispatch_prewitt.register_lazy("cupy") def register_cupy_prewitt(): import cupy import cupyx.scipy.ndimage @dispatch_prewitt.register(cupy.ndarray) def cupy_prewitt(*args, **kwargs): return cupyx.scipy.ndimage.prewitt # ================== sobel ================== @dispatch_sobel.register(np.ndarray) def numpy_sobel(*args, **kwargs): return scipy.ndimage.sobel @dispatch_sobel.register_lazy("cupy") def register_cupy_sobel(): import cupy import cupyx.scipy.ndimage @dispatch_sobel.register(cupy.ndarray) def cupy_sobel(*args, **kwargs): return cupyx.scipy.ndimage.sobel # ================== gaussian_filter ================== @dispatch_gaussian_filter.register(np.ndarray) def numpy_gaussian_filter(*args, **kwargs): return scipy.ndimage.gaussian_filter @dispatch_gaussian_filter.register_lazy("cupy") def register_cupy_gaussian_filter(): import cupy import cupyx.scipy.ndimage @dispatch_gaussian_filter.register(cupy.ndarray) def cupy_gaussian_filter(*args, **kwargs): return cupyx.scipy.ndimage.gaussian_filter # ================== gaussian_gradient_magnitude ================== @dispatch_gaussian_gradient_magnitude.register(np.ndarray) def numpy_gaussian_gradient_magnitude(*args, **kwargs): return scipy.ndimage.gaussian_gradient_magnitude @dispatch_gaussian_gradient_magnitude.register_lazy("cupy") def register_cupy_gaussian_gradient_magnitude(): import cupy import cupyx.scipy.ndimage @dispatch_gaussian_gradient_magnitude.register(cupy.ndarray) def cupy_gaussian_gradient_magnitude(*args, **kwargs): return cupyx.scipy.ndimage.gaussian_gradient_magnitude # ================== gaussian_laplace ================== @dispatch_gaussian_laplace.register(np.ndarray) def numpy_gaussian_laplace(*args, **kwargs): return scipy.ndimage.gaussian_laplace @dispatch_gaussian_laplace.register_lazy("cupy") def register_cupy_gaussian_laplace(): import cupy import cupyx.scipy.ndimage @dispatch_gaussian_laplace.register(cupy.ndarray) def cupy_gaussian_laplace(*args, **kwargs): return cupyx.scipy.ndimage.gaussian_laplace # ================== generic_filter ================== @dispatch_generic_filter.register(np.ndarray) def numpy_generic_filter(*args, **kwargs): return scipy.ndimage.generic_filter @dispatch_generic_filter.register_lazy("cupy") def register_cupy_generic_filter(): import cupy import cupyx.scipy.ndimage @dispatch_generic_filter.register(cupy.ndarray) def cupy_generic_filter(*args, **kwargs): return cupyx.scipy.ndimage.generic_filter # ================== minimum_filter ================== @dispatch_minimum_filter.register(np.ndarray) def numpy_minimum_filter(*args, **kwargs): return scipy.ndimage.minimum_filter @dispatch_minimum_filter.register_lazy("cupy") def register_cupy_minimum_filter(): import cupy import cupyx.scipy.ndimage @dispatch_minimum_filter.register(cupy.ndarray) def cupy_minimum_filter(*args, **kwargs): return cupyx.scipy.ndimage.minimum_filter # ================== median_filter ================== @dispatch_median_filter.register(np.ndarray) def numpy_median_filter(*args, **kwargs): return scipy.ndimage.median_filter @dispatch_median_filter.register_lazy("cupy") def register_cupy_median_filter(): import cupy import cupyx.scipy.ndimage @dispatch_median_filter.register(cupy.ndarray) def cupy_median_filter(*args, **kwargs): return cupyx.scipy.ndimage.median_filter # ================== maximum_filter ================== @dispatch_maximum_filter.register(np.ndarray) def numpy_maximum_filter(*args, **kwargs): return scipy.ndimage.maximum_filter @dispatch_maximum_filter.register_lazy("cupy") def register_cupy_maximum_filter(): import cupy import cupyx.scipy.ndimage @dispatch_maximum_filter.register(cupy.ndarray) def cupy_maximum_filter(*args, **kwargs): return cupyx.scipy.ndimage.maximum_filter # ================== rank_filter ================== @dispatch_rank_filter.register(np.ndarray) def numpy_rank_filter(*args, **kwargs): return scipy.ndimage.rank_filter @dispatch_rank_filter.register_lazy("cupy") def register_cupy_rank_filter(): import cupy import cupyx.scipy.ndimage @dispatch_rank_filter.register(cupy.ndarray) def cupy_rank_filter(*args, **kwargs): return cupyx.scipy.ndimage.rank_filter # ================== percentile_filter ================== @dispatch_percentile_filter.register(np.ndarray) def numpy_percentile_filter(*args, **kwargs): return scipy.ndimage.percentile_filter @dispatch_percentile_filter.register_lazy("cupy") def register_cupy_percentile_filter(): import cupy import cupyx.scipy.ndimage @dispatch_percentile_filter.register(cupy.ndarray) def cupy_percentile_filter(*args, **kwargs): return cupyx.scipy.ndimage.percentile_filter # ================== uniform_filter ================== @dispatch_uniform_filter.register(np.ndarray) def numpy_uniform_filter(*args, **kwargs): return scipy.ndimage.uniform_filter @dispatch_uniform_filter.register_lazy("cupy") def register_cupy_uniform_filter(): import cupy import cupyx.scipy.ndimage @dispatch_uniform_filter.register(cupy.ndarray) def cupy_uniform_filter(*args, **kwargs): return cupyx.scipy.ndimage.uniform_filter # ================== threshold_local_mean ================== @dispatch_threshold_local_mean.register(np.ndarray) def numpy_threshold_local_mean(*args, **kwargs): return np.mean @dispatch_threshold_local_mean.register_lazy("cupy") def register_cupy_threshold_local_mean(): import cupy @dispatch_threshold_local_mean.register(cupy.ndarray) def cupy_threshold_local_mean(*args, **kwargs): # Code snippet taken from https://github.com/cupy/cupy/issues/3909 my_mean = cupy.ReductionKernel( 'T x', # input params 'T y', # output params 'x', # map 'a + b', # reduce 'y = a / _in_ind.size()', # An undocumented variable and a hack '0', # identity value 'mean' # kernel name ) return my_mean ================================================ FILE: dask_image/dispatch/_dispatch_ndinterp.py ================================================ # -*- coding: utf-8 -*- import numpy as np from scipy import ndimage from ._dispatcher import Dispatcher __all__ = [ "dispatch_affine_transform", "dispatch_asarray", ] dispatch_affine_transform = Dispatcher(name="dispatch_affine_transform") # ================== affine_transform ================== @dispatch_affine_transform.register(np.ndarray) def numpy_affine_transform(*args, **kwargs): return ndimage.affine_transform @dispatch_affine_transform.register_lazy("cupy") def register_cupy_affine_transform(): import cupy import cupyx.scipy.ndimage @dispatch_affine_transform.register(cupy.ndarray) def cupy_affine_transform(*args, **kwargs): return cupyx.scipy.ndimage.affine_transform dispatch_spline_filter = Dispatcher(name="dispatch_spline_filter") # ================== spline_filter ================== @dispatch_spline_filter.register(np.ndarray) def numpy_spline_filter(*args, **kwargs): return ndimage.spline_filter @dispatch_spline_filter.register_lazy("cupy") def register_cupy_spline_filter(): import cupy import cupyx.scipy.ndimage @dispatch_spline_filter.register(cupy.ndarray) def cupy_spline_filter(*args, **kwargs): return cupyx.scipy.ndimage.spline_filter dispatch_spline_filter1d = Dispatcher(name="dispatch_spline_filter1d") # ================== spline_filter1d ================== @dispatch_spline_filter1d.register(np.ndarray) def numpy_spline_filter1d(*args, **kwargs): return ndimage.spline_filter1d @dispatch_spline_filter1d.register_lazy("cupy") def register_cupy_spline_filter1d(): import cupy import cupyx.scipy.ndimage @dispatch_spline_filter1d.register(cupy.ndarray) def cupy_spline_filter1d(*args, **kwargs): return cupyx.scipy.ndimage.spline_filter1d dispatch_asarray = Dispatcher(name="dispatch_asarray") # ===================== asarray ======================== @dispatch_asarray.register(np.ndarray) def numpy_asarray(*args, **kwargs): return np.asarray @dispatch_asarray.register_lazy("cupy") def register_cupy_asarray(): import cupy @dispatch_asarray.register(cupy.ndarray) def cupy_asarray(*args, **kwargs): return cupy.asarray ================================================ FILE: dask_image/dispatch/_dispatch_ndmorph.py ================================================ # -*- coding: utf-8 -*- import numpy as np import scipy.ndimage from ._dispatcher import Dispatcher __all__ = [ "dispatch_binary_dilation", "dispatch_binary_erosion", "dispatch_binary_structure", ] dispatch_binary_dilation = Dispatcher(name="dispatch_binary_dilation") dispatch_binary_erosion = Dispatcher(name="dispatch_binary_erosion") dispatch_binary_structure = Dispatcher(name='dispatch_binary_structure') # ================== binary_dilation ================== @dispatch_binary_dilation.register(np.ndarray) def numpy_binary_dilation(*args, **kwargs): return scipy.ndimage.binary_dilation @dispatch_binary_dilation.register_lazy("cupy") def register_cupy_binary_dilation(): import cupy import cupyx.scipy.ndimage @dispatch_binary_dilation.register(cupy.ndarray) def cupy_binary_dilation(*args, **kwargs): return cupyx.scipy.ndimage.binary_dilation # ================== binary_erosion ================== @dispatch_binary_erosion.register(np.ndarray) def numpy_binary_erosion(*args, **kwargs): return scipy.ndimage.binary_erosion @dispatch_binary_erosion.register_lazy("cupy") def register_cupy_binary_erosion(): import cupy import cupyx.scipy.ndimage @dispatch_binary_erosion.register(cupy.ndarray) def cupy_binary_erosion(*args, **kwargs): return cupyx.scipy.ndimage.binary_erosion # ================== generate_binary_structure ================== @dispatch_binary_structure.register(np.ndarray) def numpy_binary_structure(*args, **kwargs): return scipy.ndimage.generate_binary_structure @dispatch_binary_structure.register_lazy("cupy") def register_cupy_binary_structure(): import cupy import cupyx.scipy.ndimage @dispatch_binary_structure.register(cupy.ndarray) def cupy_binary_structure(*args, **kwargs): return cupyx.scipy.ndimage.generate_binary_structure ================================================ FILE: dask_image/dispatch/_dispatcher.py ================================================ # -*- coding: utf-8 -*- from dask.utils import Dispatch def get_type(array): """Return type of arrays contained within the dask array chunks.""" try: datatype = type(array._meta) # Check chunk type backing dask array except AttributeError: datatype = type(array) # For all non-dask arrays return datatype class Dispatcher(Dispatch): """Simple single dispatch for different dask array types.""" def __call__(self, arg, *args, **kwargs): """ Call the corresponding method based on type of dask array. """ datatype = get_type(arg) meth = self.dispatch(datatype) return meth(arg, *args, **kwargs) ================================================ FILE: dask_image/dispatch/_utils.py ================================================ # -*- coding: utf-8 -*- from ._dispatcher import get_type __all__ = [ "check_arraytypes_compatible", ] def check_arraytypes_compatible(*args): """Check array types are compatible. For arrays to be compatible they must either have the same type, or a dask array where the chunks match the same array type. Examples of compatible arrays: * Two (or more) numpy arrays * A dask array with numpy chunks, and a numpy array Examples of incompatible arrays: * A numpy array and a cupy array """ arraytypes = [get_type(arg) for arg in args] if len(set(arraytypes)) != 1: raise ValueError("Array types must be compatible.") ================================================ FILE: dask_image/imread/__init__.py ================================================ # -*- coding: utf-8 -*- import glob import numbers import warnings import dask.array as da import numpy as np import pims from tifffile import natural_sorted def imread(fname, nframes=1, *, arraytype="numpy"): """ Read image data into a Dask Array. Provides a simple, fast mechanism to ingest image data into a Dask Array. This uses the `pims` package to open images. Parameters ---------- fname : str or pathlib.Path A glob like string that may match one or multiple filenames. Where multiple filenames match, they are sorted using natural (as opposed to alphabetical) sort. nframes : int, optional Number of the frames to include in each chunk (default: 1). arraytype : str, optional Array type for dask chunks. Available options: "numpy", "cupy". Returns ------- array : dask.array.Array A Dask Array representing the contents of all image files. Warnings -------- There are several known issues with this function, and users are recommended to use `dask.array.image.imread` or `bioio` instead. """ sfname = str(fname) if not isinstance(nframes, numbers.Integral): raise ValueError("`nframes` must be an integer.") if (nframes != -1) and not (nframes > 0): raise ValueError("`nframes` must be greater than zero.") if arraytype == "numpy": arrayfunc = np.asanyarray elif arraytype == "cupy": # pragma: no cover import cupy arrayfunc = cupy.asanyarray with pims.open(sfname) as imgs: shape = (len(imgs),) + imgs.frame_shape dtype = np.dtype(imgs.pixel_type) if nframes == -1: nframes = shape[0] if nframes > shape[0]: warnings.warn( "`nframes` larger than number of frames in file." " Will truncate to number of frames in file.", RuntimeWarning ) elif shape[0] % nframes != 0: warnings.warn( "`nframes` does not nicely divide number of frames in file." " Last chunk will contain the remainder.", RuntimeWarning ) # place source filenames into dask array after sorting filenames = natural_sorted(glob.glob(sfname)) if len(filenames) > 1: ar = da.from_array(filenames, chunks=(nframes,)) multiple_files = True else: ar = da.from_array(filenames * shape[0], chunks=(nframes,)) multiple_files = False # read in data using encoded filenames a = ar.map_blocks( _map_read_frame, chunks=da.core.normalize_chunks( (nframes,) + shape[1:], shape), multiple_files=multiple_files, new_axis=list(range(1, len(shape))), arrayfunc=arrayfunc, meta=arrayfunc([]).astype(dtype), # meta overwrites `dtype` argument ) return a def _map_read_frame(x, multiple_files, block_info=None, **kwargs): fn = x[0] # get filename from input chunk if multiple_files: i, j = 0, 1 else: i, j = block_info[None]['array-location'][0] return _read_frame(fn=fn, i=slice(i, j), **kwargs) def _read_frame(fn, i, *, arrayfunc=np.asanyarray): with pims.open(fn) as imgs: return arrayfunc(imgs[i]) ================================================ FILE: dask_image/ndfilters/__init__.py ================================================ # -*- coding: utf-8 -*- __all__ = [ "convolve", "correlate", "laplace", "prewitt", "sobel", "gaussian", "gaussian_filter", "gaussian_gradient_magnitude", "gaussian_laplace", "generic_filter", "minimum_filter", "median_filter", "maximum_filter", "rank_filter", "percentile_filter", "uniform_filter", "threshold_local", ] from ._conv import convolve, correlate from ._diff import laplace from ._edge import prewitt, sobel from ._gaussian import (gaussian, gaussian_filter, gaussian_gradient_magnitude, gaussian_laplace) from ._generic import generic_filter from ._order import (maximum_filter, median_filter, minimum_filter, percentile_filter, rank_filter) from ._smooth import uniform_filter from ._threshold import threshold_local convolve.__module__ = __name__ correlate.__module__ = __name__ laplace.__module__ = __name__ prewitt.__module__ = __name__ sobel.__module__ = __name__ gaussian.__module__ = __name__ gaussian_filter.__module__ = __name__ gaussian_gradient_magnitude.__module__ = __name__ gaussian_laplace.__module__ = __name__ generic_filter.__module__ = __name__ minimum_filter.__module__ = __name__ median_filter.__module__ = __name__ maximum_filter.__module__ = __name__ rank_filter.__module__ = __name__ percentile_filter.__module__ = __name__ uniform_filter.__module__ = __name__ threshold_local.__module__ = __name__ ================================================ FILE: dask_image/ndfilters/_conv.py ================================================ # -*- coding: utf-8 -*- import scipy.ndimage from ..dispatch._dispatch_ndfilters import (dispatch_convolve, dispatch_correlate) from ..dispatch._utils import check_arraytypes_compatible from . import _utils __all__ = [ "convolve", "correlate", ] @_utils._update_wrapper(scipy.ndimage.convolve) def convolve(image, weights, mode="reflect", cval=0.0, origin=0): check_arraytypes_compatible(image, weights) origin = _utils._get_origin(weights.shape, origin) depth = _utils._get_depth(weights.shape, origin) depth, boundary = _utils._get_depth_boundary(image.ndim, depth, "none") if mode == "wrap": # Fixes https://github.com/dask/dask-image/issues/242 boundary = "periodic" mode = "constant" result = image.map_overlap( dispatch_convolve(image), depth=depth, boundary=boundary, dtype=image.dtype, meta=image._meta, weights=weights, mode=mode, cval=cval, origin=origin, ) return result @_utils._update_wrapper(scipy.ndimage.correlate) def correlate(image, weights, mode="reflect", cval=0.0, origin=0): check_arraytypes_compatible(image, weights) origin = _utils._get_origin(weights.shape, origin) depth = _utils._get_depth(weights.shape, origin) depth, boundary = _utils._get_depth_boundary(image.ndim, depth, "none") if mode == "wrap": # Fixes https://github.com/dask/dask-image/issues/242 boundary = "periodic" mode = "constant" result = image.map_overlap( dispatch_correlate(image), depth=depth, boundary=boundary, dtype=image.dtype, meta=image._meta, weights=weights, mode=mode, cval=cval, origin=origin, ) return result ================================================ FILE: dask_image/ndfilters/_diff.py ================================================ # -*- coding: utf-8 -*- import scipy.ndimage from ..dispatch._dispatch_ndfilters import dispatch_laplace from . import _utils __all__ = [ "laplace", ] @_utils._update_wrapper(scipy.ndimage.laplace) def laplace(image, mode='reflect', cval=0.0): result = image.map_overlap( dispatch_laplace(image), depth=(image.ndim * (1,)), boundary="none", dtype=image.dtype, meta=image._meta, mode=mode, cval=cval ) return result ================================================ FILE: dask_image/ndfilters/_edge.py ================================================ # -*- coding: utf-8 -*- import numbers import scipy.ndimage from ..dispatch._dispatch_ndfilters import dispatch_prewitt, dispatch_sobel from . import _utils __all__ = [ "prewitt", "sobel", ] def _validate_axis(ndim, axis): if not isinstance(axis, numbers.Integral): raise ValueError("The axis must be of integral type.") if axis < -ndim or axis >= ndim: raise ValueError("The axis is out of range.") @_utils._update_wrapper(scipy.ndimage.prewitt) def prewitt(image, axis=-1, mode='reflect', cval=0.0): _validate_axis(image.ndim, axis) result = image.map_overlap( dispatch_prewitt(image), depth=(image.ndim * (1,)), boundary="none", dtype=image.dtype, meta=image._meta, axis=axis, mode=mode, cval=cval ) return result @_utils._update_wrapper(scipy.ndimage.sobel) def sobel(image, axis=-1, mode='reflect', cval=0.0): _validate_axis(image.ndim, axis) result = image.map_overlap( dispatch_sobel(image), depth=(image.ndim * (1,)), boundary="none", dtype=image.dtype, meta=image._meta, axis=axis, mode=mode, cval=cval ) return result ================================================ FILE: dask_image/ndfilters/_gaussian.py ================================================ # -*- coding: utf-8 -*- import numbers import numpy as np import scipy.ndimage from ..dispatch._dispatch_ndfilters import ( dispatch_gaussian_filter, dispatch_gaussian_gradient_magnitude, dispatch_gaussian_laplace) from . import _utils __all__ = [ "gaussian_filter", "gaussian_gradient_magnitude", "gaussian_laplace", "gaussian" ] def _get_sigmas(image, sigma): ndim = image.ndim nsigmas = np.array(sigma) if nsigmas.ndim == 0: nsigmas = np.array(ndim * [nsigmas[()]]) if nsigmas.ndim != 1: raise RuntimeError( "Must have a single sigma or a single sequence." ) if ndim != len(nsigmas): raise RuntimeError( "Must have an equal number of sigmas to image dimensions." ) if not issubclass(nsigmas.dtype.type, numbers.Real): raise TypeError("Must have real sigmas.") nsigmas = tuple(nsigmas) return nsigmas def _get_border(image, sigma, truncate): sigma = np.array(_get_sigmas(image, sigma)) if not isinstance(truncate, numbers.Real): raise TypeError("Must have a real truncate value.") half_shape = tuple(np.ceil(sigma * truncate).astype(int)) return half_shape @_utils._update_wrapper(scipy.ndimage.gaussian_filter) def gaussian_filter(image, sigma, order=0, mode='reflect', cval=0.0, truncate=4.0): sigma = _get_sigmas(image, sigma) depth = _get_border(image, sigma, truncate) depth, boundary = _utils._get_depth_boundary(image.ndim, depth, "none") result = image.map_overlap( dispatch_gaussian_filter(image), depth=depth, boundary=boundary, dtype=image.dtype, meta=image._meta, sigma=sigma, order=order, mode=mode, cval=cval, truncate=truncate ) return result def gaussian(image, sigma, order=0, mode='reflect', cval=0.0, truncate=4.0): """Alias of `dask_image.ndfilters.gaussian_filter`.""" return gaussian_filter(image, sigma, order=order, mode=mode, cval=cval, truncate=truncate) @_utils._update_wrapper(scipy.ndimage.gaussian_gradient_magnitude) def gaussian_gradient_magnitude(image, sigma, mode='reflect', cval=0.0, truncate=4.0, **kwargs): sigma = _get_sigmas(image, sigma) depth = _get_border(image, sigma, truncate) depth, boundary = _utils._get_depth_boundary(image.ndim, depth, "none") result = image.map_overlap( dispatch_gaussian_gradient_magnitude(image), depth=depth, boundary=boundary, dtype=image.dtype, meta=image._meta, sigma=sigma, mode=mode, cval=cval, truncate=truncate, **kwargs ) return result @_utils._update_wrapper(scipy.ndimage.gaussian_laplace) def gaussian_laplace(image, sigma, mode='reflect', cval=0.0, truncate=4.0, **kwargs): sigma = _get_sigmas(image, sigma) depth = _get_border(image, sigma, truncate) depth, boundary = _utils._get_depth_boundary(image.ndim, depth, "none") result = image.map_overlap( dispatch_gaussian_laplace(image), depth=depth, boundary=boundary, dtype=image.dtype, meta=image._meta, sigma=sigma, mode=mode, cval=cval, truncate=truncate, **kwargs ) return result ================================================ FILE: dask_image/ndfilters/_generic.py ================================================ # -*- coding: utf-8 -*- import numpy as np import scipy.ndimage from ..dispatch._dispatch_ndfilters import dispatch_generic_filter from . import _utils __all__ = [ "generic_filter", ] @_utils._update_wrapper(scipy.ndimage.generic_filter) def generic_filter(image, function, size=None, footprint=None, mode='reflect', cval=0.0, origin=0, extra_arguments=tuple(), extra_keywords=dict()): footprint = _utils._get_footprint(image.ndim, size, footprint) origin = _utils._get_origin(footprint.shape, origin) depth = _utils._get_depth(footprint.shape, origin) depth, boundary = _utils._get_depth_boundary(footprint.ndim, depth, "none") if type(image._meta) is np.ndarray: kwargs = {"extra_arguments": extra_arguments, "extra_keywords": extra_keywords} else: # pragma: no cover # cupy generic_filter doesn't support extra_arguments or extra_keywords kwargs = {} result = image.map_overlap( dispatch_generic_filter(image), depth=depth, boundary=boundary, dtype=image.dtype, meta=image._meta, function=function, footprint=footprint, mode=mode, cval=cval, origin=origin, **kwargs ) return result ================================================ FILE: dask_image/ndfilters/_order.py ================================================ # -*- coding: utf-8 -*- import scipy.ndimage from ..dispatch._dispatch_ndfilters import (dispatch_maximum_filter, dispatch_median_filter, dispatch_minimum_filter, dispatch_percentile_filter, dispatch_rank_filter) from . import _utils __all__ = [ "minimum_filter", "median_filter", "maximum_filter", "rank_filter", "percentile_filter", ] @_utils._update_wrapper(scipy.ndimage.minimum_filter) def minimum_filter(image, size=None, footprint=None, mode='reflect', cval=0.0, origin=0): footprint = _utils._get_footprint(image.ndim, size, footprint) origin = _utils._get_origin(footprint.shape, origin) depth = _utils._get_depth(footprint.shape, origin) depth, boundary = _utils._get_depth_boundary(footprint.ndim, depth, "none") result = image.map_overlap( dispatch_minimum_filter(image), depth=depth, boundary=boundary, dtype=image.dtype, meta=image._meta, footprint=footprint, mode=mode, cval=cval, origin=origin ) return result @_utils._update_wrapper(scipy.ndimage.median_filter) def median_filter(image, size=None, footprint=None, mode='reflect', cval=0.0, origin=0): footprint = _utils._get_footprint(image.ndim, size, footprint) origin = _utils._get_origin(footprint.shape, origin) depth = _utils._get_depth(footprint.shape, origin) depth, boundary = _utils._get_depth_boundary(footprint.ndim, depth, "none") result = image.map_overlap( dispatch_median_filter(image), depth=depth, boundary=boundary, dtype=image.dtype, meta=image._meta, footprint=footprint, mode=mode, cval=cval, origin=origin ) return result @_utils._update_wrapper(scipy.ndimage.maximum_filter) def maximum_filter(image, size=None, footprint=None, mode='reflect', cval=0.0, origin=0): footprint = _utils._get_footprint(image.ndim, size, footprint) origin = _utils._get_origin(footprint.shape, origin) depth = _utils._get_depth(footprint.shape, origin) depth, boundary = _utils._get_depth_boundary(footprint.ndim, depth, "none") result = image.map_overlap( dispatch_maximum_filter(image), depth=depth, boundary=boundary, dtype=image.dtype, meta=image._meta, footprint=footprint, mode=mode, cval=cval, origin=origin ) return result @_utils._update_wrapper(scipy.ndimage.rank_filter) def rank_filter(image, rank, size=None, footprint=None, mode='reflect', cval=0.0, origin=0): footprint = _utils._get_footprint(image.ndim, size, footprint) origin = _utils._get_origin(footprint.shape, origin) depth = _utils._get_depth(footprint.shape, origin) depth, boundary = _utils._get_depth_boundary(footprint.ndim, depth, "none") result = image.map_overlap( dispatch_rank_filter(image), depth=depth, boundary=boundary, dtype=image.dtype, meta=image._meta, rank=rank, footprint=footprint, mode=mode, cval=cval, origin=origin ) return result @_utils._update_wrapper(scipy.ndimage.percentile_filter) def percentile_filter(image, percentile, size=None, footprint=None, mode='reflect', cval=0.0, origin=0): footprint = _utils._get_footprint(image.ndim, size, footprint) origin = _utils._get_origin(footprint.shape, origin) depth = _utils._get_depth(footprint.shape, origin) depth, boundary = _utils._get_depth_boundary(footprint.ndim, depth, "none") result = image.map_overlap( dispatch_percentile_filter(image), depth=depth, boundary=boundary, dtype=image.dtype, meta=image._meta, percentile=percentile, footprint=footprint, mode=mode, cval=cval, origin=origin ) return result ================================================ FILE: dask_image/ndfilters/_smooth.py ================================================ # -*- coding: utf-8 -*- import scipy.ndimage from ..dispatch._dispatch_ndfilters import dispatch_uniform_filter from . import _utils from ._gaussian import gaussian_filter __all__ = [ "uniform_filter", ] gaussian_filter = gaussian_filter @_utils._update_wrapper(scipy.ndimage.uniform_filter) def uniform_filter(image, size=3, mode='reflect', cval=0.0, origin=0): size = _utils._get_size(image.ndim, size) depth = _utils._get_depth(size, origin) depth, boundary = _utils._get_depth_boundary(image.ndim, depth, "none") result = image.map_overlap( dispatch_uniform_filter(image), depth=depth, boundary=boundary, dtype=image.dtype, meta=image._meta, size=size, mode=mode, cval=cval, origin=origin ) return result ================================================ FILE: dask_image/ndfilters/_threshold.py ================================================ import numpy as np from ..dispatch._dispatch_ndfilters import dispatch_threshold_local_mean from . import _gaussian, _generic, _order __all__ = [ "threshold_local", ] def threshold_local(image, block_size, method='gaussian', offset=0, mode='reflect', param=None, cval=0): """Compute a threshold mask image based on local pixel neighborhood. Also known as adaptive or dynamic thresholding[1]_. The threshold value is the weighted mean for the local neighborhood of a pixel subtracted by a constant. Alternatively the threshold can be determined dynamically by a given function, using the 'generic' method. Parameters ---------- image : (N, M) dask ndarray Input image. block_size : int or list/tuple/array Size of pixel neighborhood which is used to calculate the threshold value. (1) A single value for use in all dimensions or (2) A tuple, list, or array with length equal to image.ndim method : {'generic', 'gaussian', 'mean', 'median'}, optional Method used to determine adaptive threshold for local neighbourhood in weighted mean image. * 'generic': use custom function (see `param` parameter) * 'gaussian': apply gaussian filter (see `param` parameter for custom\ sigma value) * 'mean': apply arithmetic mean filter * 'median': apply median rank filter By default the 'gaussian' method is used. offset : float, optional Constant subtracted from weighted mean of neighborhood to calculate the local threshold value. Default offset is 0. mode : {'reflect', 'constant', 'nearest', 'mirror', 'wrap'}, optional The mode parameter determines how the array borders are handled, where cval is the value when mode is equal to 'constant'. Default is 'reflect'. param : {int, function}, optional Either specify sigma for 'gaussian' method or function object for 'generic' method. This functions takes the flat array of local neighbourhood as a single argument and returns the calculated threshold for the centre pixel. cval : float, optional Value to fill past edges of input if mode is 'constant'. Returns ------- threshold : (N, M) dask ndarray Threshold image. All pixels in the input image higher than the corresponding pixel in the threshold image are considered foreground. References ---------- .. [1] https://docs.opencv.org/modules/imgproc/doc/miscellaneous_transformations.html?highlight=threshold Examples -------- >>> import dask.array as da >>> image = da.random.random((1000, 1000), chunks=(100, 100)) >>> result = threshold_local(image, 15, 'gaussian') """ # noqa image = image.astype(np.float64) if method == 'generic': if not callable(param): raise ValueError("Must include a valid function to use as the " "'param' keyword argument.") thresh_image = _generic.generic_filter(image, param, block_size, mode=mode, cval=cval) elif method == 'gaussian': if param is None: sigma = (np.array(block_size).astype(float) - 1) / 6.0 else: sigma = param thresh_image = _gaussian.gaussian_filter(image, sigma, mode=mode, cval=cval) elif method == 'mean': thresh_image = _generic.generic_filter( image, dispatch_threshold_local_mean(image), block_size, mode=mode, cval=cval) elif method == 'median': thresh_image = _order.median_filter(image, block_size, mode=mode, cval=cval) else: raise ValueError("Invalid method specified. Please use `generic`, " "`gaussian`, `mean`, or `median`.") return thresh_image - offset ================================================ FILE: dask_image/ndfilters/_utils.py ================================================ # -*- coding: utf-8 -*- import collections import inspect import numbers import re import numpy as np def _get_docstring(func): # Drop the output parameter from the docstring. split_doc_params = lambda s: re.subn( # noqa: E731 "( [A-Za-z]+ : )", "\0\\1", s)[0].split("\0") drop_doc_param = lambda s: not s.startswith(" output : ") # noqa: E731 func_doc = "" if func.__doc__ is None else func.__doc__ cleaned_docstring = "".join([ l for l in split_doc_params(func_doc) if drop_doc_param(l) # noqa: E741, E501 ]) cleaned_docstring = cleaned_docstring.replace('input', 'image') cleaned_docstring = cleaned_docstring.replace('labels', 'label_image') cleaned_docstring = cleaned_docstring.split('Examples')[0].strip() docstring = """ Wrapped copy of "{mod_name}.{func_name}" Excludes the output parameter as it would not work with Dask arrays. Original docstring: {doc} """.format( mod_name=inspect.getmodule(func).__name__, func_name=func.__name__, doc=cleaned_docstring, ) return docstring def _update_wrapper(func): def _updater(wrapper): wrapper.__name__ = func.__name__ wrapper.__doc__ = _get_docstring(func) return wrapper return _updater def _get_depth_boundary(ndim, depth, boundary=None): strlike = (bytes, str) if not isinstance(ndim, numbers.Integral): raise TypeError("Expected integer value for `ndim`.") if ndim <= 0: raise ValueError("Expected positive value for `ndim`.") if isinstance(depth, numbers.Number): depth = ndim * (depth,) if not isinstance(depth, collections.abc.Sized): raise TypeError("Unexpected type for `depth`.") if len(depth) != ndim: raise ValueError("Expected `depth` to have a length equal to `ndim`.") if isinstance(depth, collections.abc.Sequence): depth = dict(zip(range(ndim), depth)) if not isinstance(depth, collections.abc.Mapping): raise TypeError("Unexpected type for `depth`.") if not all(map(lambda d: isinstance(d, numbers.Integral), depth.values())): raise TypeError("Expected integer values for `depth`.") if not all(map(lambda d: d >= 0, depth.values())): raise ValueError("Expected positive semidefinite values for `depth`.") depth = dict([(a, int(d)) for a, d in depth.items()]) if (boundary is None) or isinstance(boundary, strlike): boundary = ndim * (boundary,) if not isinstance(boundary, collections.abc.Sized): raise TypeError("Unexpected type for `boundary`.") if len(boundary) != ndim: raise ValueError( "Expected `boundary` to have a length equal to `ndim`." ) if isinstance(boundary, collections.abc.Sequence): boundary = dict(zip(range(ndim), boundary)) if not isinstance(boundary, collections.abc.Mapping): raise TypeError("Unexpected type for `boundary`.") type_check = lambda b: (b is None) or isinstance(b, strlike) # noqa: E731 if not all(map(type_check, boundary.values())): raise TypeError("Expected string-like values for `boundary`.") return depth, boundary def _get_size(ndim, size): if not isinstance(ndim, numbers.Integral): raise TypeError("The ndim must be of integral type.") if isinstance(size, numbers.Number): size = ndim * (size,) size = np.array(size) if size.ndim != 1: raise RuntimeError("The size must have only one dimension.") if len(size) != ndim: raise RuntimeError( "The size must have a length equal to the number of dimensions." ) if not issubclass(size.dtype.type, numbers.Integral): raise TypeError("The size must be of integral type.") size = tuple(size) return size def _get_origin(size, origin=0): size = np.array(size) ndim = len(size) if isinstance(origin, numbers.Number): origin = ndim * (origin,) origin = np.array(origin) if not issubclass(origin.dtype.type, numbers.Integral): raise TypeError("The origin must be of integral type.") # Validate dimensions. if origin.ndim != 1: raise RuntimeError("The origin must have only one dimension.") if len(origin) != ndim: raise RuntimeError( "The origin must have the same length as the number of dimensions" " as the array being filtered." ) # Validate origin is bounded. if not (origin < ((size + 1) // 2)).all(): raise ValueError("The origin must be within the footprint.") origin = tuple(origin) return origin def _get_depth(size, origin=0): origin = np.array(_get_origin(size, origin)) size = np.array(size) half_size = size // 2 depth = half_size + abs(origin) depth = tuple(depth) return depth def _get_footprint(ndim, size=None, footprint=None): # Verify that we only got size or footprint. if size is None and footprint is None: raise RuntimeError("Must provide either size or footprint.") if size is not None and footprint is not None: raise RuntimeError("Provide either size or footprint, but not both.") # Get a footprint based on the size. if size is not None: size = _get_size(ndim, size) footprint = np.ones(size, dtype=bool) # Validate the footprint. if footprint.ndim != ndim: raise RuntimeError( "The footprint must have the same number of dimensions as" " the array being filtered." ) if footprint.size == 0: raise RuntimeError("The footprint must have only non-zero dimensions.") # Convert to Boolean. footprint = (footprint != 0) return footprint ================================================ FILE: dask_image/ndfourier/__init__.py ================================================ # -*- coding: utf-8 -*- import numbers import dask.array as da from . import _utils __all__ = [ "fourier_gaussian", "fourier_shift", "fourier_uniform", ] def fourier_gaussian(image, sigma, n=-1, axis=-1): """ Multi-dimensional Gaussian fourier filter. The array is multiplied with the fourier transform of a Gaussian kernel. Parameters ---------- image : array_like The input image. sigma : float or sequence The sigma of the Gaussian kernel. If a float, `sigma` is the same for all axes. If a sequence, `sigma` has to contain one value for each axis. n : int, optional If `n` is negative (default), then the image is assumed to be the result of a complex fft. If `n` is larger than or equal to zero, the image is assumed to be the result of a real fft, and `n` gives the length of the array before transformation along the real transform direction. axis : int, optional The axis of the real transform. Returns ------- fourier_gaussian : Dask Array Examples -------- >>> from scipy import ndimage, misc >>> import numpy.fft >>> import matplotlib.pyplot as plt >>> fig, (ax1, ax2) = plt.subplots(1, 2) >>> plt.gray() # show the filtered result in grayscale >>> ascent = misc.ascent() >>> image = numpy.fft.fft2(ascent) >>> result = ndimage.fourier_gaussian(image, sigma=4) >>> result = numpy.fft.ifft2(result) >>> ax1.imshow(ascent) """ # Validate and normalize arguments image, sigma, n, axis = _utils._norm_args(image, sigma, n=n, axis=axis) # Compute frequencies ang_freq_grid = _utils._get_ang_freq_grid( image.shape, chunks=image.chunks, n=n, axis=axis, dtype=sigma.dtype ) # Compute Fourier transformed Gaussian result = image.copy() scale = (sigma ** 2) / -2 for ax, f in enumerate(ang_freq_grid): f *= f gaussian = da.exp(scale[ax] * f) gaussian = _utils._reshape_nd(gaussian, ndim=image.ndim, axis=ax) result *= gaussian return result def fourier_shift(image, shift, n=-1, axis=-1): """ Multi-dimensional fourier shift filter. The array is multiplied with the fourier transform of a shift operation. Parameters ---------- image : array_like The input image. shift : float or sequence The size of the box used for filtering. If a float, `shift` is the same for all axes. If a sequence, `shift` has to contain one value for each axis. n : int, optional If `n` is negative (default), then the image is assumed to be the result of a complex fft. If `n` is larger than or equal to zero, the image is assumed to be the result of a real fft, and `n` gives the length of the array before transformation along the real transform direction. axis : int, optional The axis of the real transform. Returns ------- fourier_shift : Dask Array Examples -------- >>> from scipy import ndimage, misc >>> import matplotlib.pyplot as plt >>> import numpy.fft >>> fig, (ax1, ax2) = plt.subplots(1, 2) >>> plt.gray() # show the filtered result in grayscale >>> ascent = misc.ascent() >>> image = numpy.fft.fft2(ascent) >>> result = ndimage.fourier_shift(image, shift=200) >>> result = numpy.fft.ifft2(result) >>> ax1.imshow(ascent) >>> ax2.imshow(result.real) # the imaginary part is an artifact >>> plt.show() """ if issubclass(image.dtype.type, numbers.Real): image = image.astype(complex) # Validate and normalize arguments image, shift, n, axis = _utils._norm_args(image, shift, n=n, axis=axis) # Constants with type converted J = image.dtype.type(1j) # Get the grid of frequencies ang_freq_grid = _utils._get_ang_freq_grid( image.shape, chunks=image.chunks, n=n, axis=axis, dtype=shift.dtype ) # Apply shift result = image.copy() for ax, f in enumerate(ang_freq_grid): phase_shift = da.exp((-J) * shift[ax] * f) phase_shift = _utils._reshape_nd(phase_shift, ndim=image.ndim, axis=ax) result *= phase_shift return result def fourier_uniform(image, size, n=-1, axis=-1): """ Multi-dimensional uniform fourier filter. The array is multiplied with the fourier transform of a box of given size. Parameters ---------- image : array_like The input image. size : float or sequence The size of the box used for filtering. If a float, `size` is the same for all axes. If a sequence, `size` has to contain one value for each axis. n : int, optional If `n` is negative (default), then the image is assumed to be the result of a complex fft. If `n` is larger than or equal to zero, the image is assumed to be the result of a real fft, and `n` gives the length of the array before transformation along the real transform direction. axis : int, optional The axis of the real transform. Returns ------- fourier_uniform : Dask Array The filtered image. If `output` is given as a parameter, None is returned. Examples -------- >>> from scipy import ndimage, misc >>> import numpy.fft >>> import matplotlib.pyplot as plt >>> fig, (ax1, ax2) = plt.subplots(1, 2) >>> plt.gray() # show the filtered result in grayscale >>> ascent = misc.ascent() >>> image = numpy.fft.fft2(ascent) >>> result = ndimage.fourier_uniform(image, size=20) >>> result = numpy.fft.ifft2(result) >>> ax1.imshow(ascent) >>> ax2.imshow(result.real) # the imaginary part is an artifact >>> plt.show() """ # Validate and normalize arguments image, size, n, axis = _utils._norm_args(image, size, n=n, axis=axis) # Get the grid of frequencies freq_grid = _utils._get_freq_grid( image.shape, chunks=image.chunks, n=n, axis=axis, dtype=size.dtype ) # Compute uniform filter result = image.copy() for ax, f in enumerate(freq_grid): uniform = da.sinc(size[ax] * f) uniform = _utils._reshape_nd(uniform, ndim=image.ndim, axis=ax) result *= uniform return result ================================================ FILE: dask_image/ndfourier/_utils.py ================================================ # -*- coding: utf-8 -*- import numbers import dask.array as da import numpy as np def _get_freq_grid(shape, chunks, axis, n, dtype=float): assert len(shape) == len(chunks) shape = tuple(shape) dtype = np.dtype(dtype).type assert (issubclass(dtype, numbers.Real) and not issubclass(dtype, numbers.Integral)) axis = axis % len(shape) freq_grid = [] for ax, (s, c) in enumerate(zip(shape, chunks)): if axis == ax and n > 0: f = da.fft.rfftfreq(n, chunks=c).astype(dtype) else: f = da.fft.fftfreq(s, chunks=c).astype(dtype) freq_grid.append(f) freq_grid = da.meshgrid(*freq_grid, indexing="ij", sparse=True) return freq_grid def _get_ang_freq_grid(shape, chunks, axis, n, dtype=float): dtype = np.dtype(dtype).type assert (issubclass(dtype, numbers.Real) and not issubclass(dtype, numbers.Integral)) pi = dtype(np.pi) freq_grid = _get_freq_grid(shape, chunks, axis, n, dtype=dtype) ang_freq_grid = tuple((2 * pi) * f for f in freq_grid) return ang_freq_grid def _norm_args(a, s, n=-1, axis=-1): if issubclass(a.dtype.type, numbers.Integral): a = a.astype(float) if isinstance(s, numbers.Number): s = np.array(a.ndim * [s]) elif not isinstance(s, da.Array): s = np.array(s) if issubclass(s.dtype.type, numbers.Integral): s = s.astype(a.real.dtype) elif not issubclass(s.dtype.type, numbers.Real): raise TypeError("The `s` must contain real value(s).") if s.shape != (a.ndim,): raise RuntimeError( "Shape of `s` must be 1-D and equal to the input's rank." ) if n != -1 and a.shape[axis] != (n // 2 + 1): raise NotImplementedError( "In the case of real-valued images, it is required that " "(n // 2 + 1) == image.shape[axis]." ) return (a, s, n, axis) def _reshape_nd(arr, ndim, axis): """Promote a 1d array to ndim with non-singleton size along axis.""" nd_shape = (1,) * axis + (arr.size,) + (1,) * (ndim - axis - 1) return arr.reshape(nd_shape) ================================================ FILE: dask_image/ndinterp/__init__.py ================================================ __all__ = [ "affine_transform", "map_coordinates", "rotate", "spline_filter", "spline_filter1d", ] from ._affine_transform import affine_transform from ._map_coordinates import map_coordinates from ._rotate import rotate from ._spline_filters import spline_filter, spline_filter1d affine_transform.__module__ == __name__ map_coordinates.__module__ == __name__ rotate.__module__ == __name__ spline_filter.__module__ == __name__ spline_filter1d.__module__ == __name__ ================================================ FILE: dask_image/ndinterp/_affine_transform.py ================================================ # -*- coding: utf-8 -*- from itertools import product import dask.array as da import numpy as np from dask.base import tokenize from dask.highlevelgraph import HighLevelGraph import scipy from scipy.ndimage import affine_transform as ndimage_affine_transform from ._spline_filters import spline_filter from ..dispatch._dispatch_ndinterp import ( dispatch_affine_transform, dispatch_asarray, ) def affine_transform( image, matrix, offset=0.0, output_shape=None, order=1, output_chunks=None, **kwargs ): """Apply an affine transform using Dask. For every output chunk, only the slice containing the relevant part of the image is processed. Chunkwise processing is performed either using `ndimage.affine_transform` or `cupyx.scipy.ndimage.affine_transform`, depending on the input type. Notes ----- Differences to `ndimage.affine_transformation`: - currently, prefiltering is not supported (affecting the output in case of interpolation `order > 1`) - default order is 1 - modes 'reflect', 'mirror' and 'wrap' are not supported Arguments equal to `ndimage.affine_transformation`, except for `output_chunks`. Parameters ---------- image : array_like (Numpy Array, Cupy Array, Dask Array...) The image array. matrix : array (ndim,), (ndim, ndim), (ndim, ndim+1) or (ndim+1, ndim+1) Transformation matrix. offset : float or sequence, optional The offset into the array where the transform is applied. If a float, `offset` is the same for each axis. If a sequence, `offset` should contain one value for each axis. output_shape : tuple of ints, optional The shape of the array to be returned. order : int, optional The order of the spline interpolation. Note that for order>1 scipy's affine_transform applies prefiltering, which is not yet supported and skipped in this implementation. output_chunks : tuple of ints, optional The shape of the chunks of the output Dask Array. Returns ------- affine_transform : Dask Array A dask array representing the transformed output """ if not isinstance(image, da.core.Array): image = da.from_array(image) if output_shape is None: output_shape = image.shape if output_chunks is None: output_chunks = image.shape # Perform test run to ensure parameter validity. ndimage_affine_transform(np.zeros([0] * image.ndim), matrix, offset) # Make sure parameters contained in matrix and offset # are not overlapping, i.e. that the offset is valid as # it needs to be modified for each chunk. # Further parameter checks are performed directly by # `ndimage.affine_transform`. matrix = np.asarray(matrix) offset = np.asarray(offset).squeeze() # these lines were copied and adapted from `ndimage.affine_transform` if (matrix.ndim == 2 and matrix.shape[1] == image.ndim + 1 and (matrix.shape[0] in [image.ndim, image.ndim + 1])): # assume input is homogeneous coordinate transformation matrix offset = matrix[:image.ndim, image.ndim] matrix = matrix[:image.ndim, :image.ndim] cval = kwargs.pop('cval', 0) mode = kwargs.pop('mode', 'constant') prefilter = kwargs.pop('prefilter', False) supported_modes = ['constant', 'nearest'] if scipy.__version__ > np.lib.NumpyVersion('1.6.0'): supported_modes += ['grid-constant'] if mode in ['wrap', 'reflect', 'mirror', 'grid-mirror', 'grid-wrap']: raise NotImplementedError( f"Mode {mode} is not currently supported. It must be one of " f"{supported_modes}.") # process kwargs if prefilter and order > 1: # prefilter is not yet supported for all modes if mode in ['nearest', 'grid-constant']: raise NotImplementedError( f"order > 1 with mode='{mode}' is not supported. Currently " f"prefilter is only supported with mode='constant'." ) image = spline_filter(image, order, output=np.float64, mode=mode) n = image.ndim image_shape = image.shape # calculate output array properties normalized_chunks = da.core.normalize_chunks(output_chunks, tuple(output_shape)) block_indices = product(*(range(len(bds)) for bds in normalized_chunks)) block_offsets = [np.cumsum((0,) + bds[:-1]) for bds in normalized_chunks] # use dispatching mechanism to determine backend affine_transform_method = dispatch_affine_transform(image) asarray_method = dispatch_asarray(image) # construct dask graph for output array # using unique and deterministic identifier output_name = 'affine_transform-' + tokenize(image, matrix, offset, output_shape, output_chunks, kwargs) output_layer = {} rel_images = [] for ib, block_ind in enumerate(block_indices): out_chunk_shape = [normalized_chunks[dim][block_ind[dim]] for dim in range(n)] out_chunk_offset = [block_offsets[dim][block_ind[dim]] for dim in range(n)] out_chunk_edges = np.array([i for i in np.ndindex(tuple([2] * n))])\ * np.array(out_chunk_shape) + np.array(out_chunk_offset) # map output chunk edges onto input image coordinates # to define the input region relevant for the current chunk if matrix.ndim == 1 and len(matrix) == image.ndim: rel_image_edges = matrix * out_chunk_edges + offset else: rel_image_edges = np.dot(matrix, out_chunk_edges.T).T + offset rel_image_i = np.min(rel_image_edges, 0) rel_image_f = np.max(rel_image_edges, 0) # Calculate edge coordinates required for the footprint of the # spline kernel according to # https://github.com/scipy/scipy/blob/9c0d08d7d11fc33311a96d2ac3ad73c8f6e3df00/scipy/ndimage/src/ni_interpolation.c#L412-L419 # noqa: E501 # Also see this discussion: # https://github.com/dask/dask-image/issues/24#issuecomment-706165593 # noqa: E501 for dim in range(n): if order % 2 == 0: rel_image_i[dim] += 0.5 rel_image_f[dim] += 0.5 rel_image_i[dim] = np.floor(rel_image_i[dim]) - order // 2 rel_image_f[dim] = np.floor(rel_image_f[dim]) - order // 2 + order if order == 0: # required for consistency with scipy.ndimage rel_image_i[dim] -= 1 # clip image coordinates to image extent for dim, s in zip(range(n), image_shape): rel_image_i[dim] = np.clip(rel_image_i[dim], 0, s - 1) rel_image_f[dim] = np.clip(rel_image_f[dim], 0, s - 1) rel_image_slice = tuple([slice(int(rel_image_i[dim]), int(rel_image_f[dim]) + 2) for dim in range(n)]) rel_image = image[rel_image_slice] """Block comment for future developers explaining how `offset` is transformed into `offset_prime` for each output chunk. Modify offset to point into cropped image. y = Mx + o Coordinate substitution: y' = y - y0(min_coord_px) x' = x - x0(chunk_offset) Then: y' = Mx' + o + Mx0 - y0 M' = M o' = o + Mx0 - y0 """ offset_prime = offset + np.dot(matrix, out_chunk_offset) - rel_image_i output_layer[(output_name,) + block_ind] = ( affine_transform_method, (da.core.concatenate3, rel_image.__dask_keys__()), asarray_method(matrix), offset_prime, tuple(out_chunk_shape), # output_shape None, # out order, mode, cval, False # prefilter ) rel_images.append(rel_image) graph = HighLevelGraph.from_collections(output_name, output_layer, dependencies=[image] + rel_images) meta = dispatch_asarray(image)([0]).astype(image.dtype) transformed = da.Array(graph, output_name, shape=tuple(output_shape), # chunks=output_chunks, chunks=normalized_chunks, meta=meta) return transformed ================================================ FILE: dask_image/ndinterp/_map_coordinates.py ================================================ # -*- coding: utf-8 -*- from dask import delayed import dask.array as da import numpy as np from dask.base import tokenize from scipy.ndimage import map_coordinates as ndimage_map_coordinates from scipy.ndimage import labeled_comprehension as\ ndimage_labeled_comprehension from ..dispatch._utils import get_type def _map_single_coordinates_array_chunk( input, coordinates, order=3, mode='constant', cval=0.0, prefilter=False): """ Central helper function for implementing map_coordinates. Receives 'input' as a dask array and computed coordinates. Implementation details and steps: 1) associate each coordinate in coordinates with the chunk it maps to in the input 2) for each input chunk that has been associated to at least one coordinate, calculate the minimal slice required to map all coordinates that are associated to it (note that resulting slice coordinates can lie outside of the coordinate's chunk) 3) for each previously obtained slice and its associated coordinates, define a dask task and apply ndimage.map_coordinates 4) outputs of ndimage.map_coordinates are rearranged to match input order """ # STEP 1: Associate each coordinate in coordinates with # the chunk it maps to in the input array # get the input chunks each coordinate maps onto coords_input_chunk_locations = coordinates.T // np.array(input.chunksize) # map out-of-bounds chunk locations to valid input chunks coords_input_chunk_locations = np.clip( coords_input_chunk_locations, 0, np.array(input.numblocks) - 1 ) # all input chunk locations input_chunk_locations = np.array([i for i in np.ndindex(input.numblocks)]) # linearize input chunk locations coords_input_chunk_locations_linear = np.sum( coords_input_chunk_locations * np.array( [np.prod(input.numblocks[:dim]) for dim in range(input.ndim)])[::-1], axis=1, dtype=np.int64) # determine the input chunks that have coords associated and # count how many coords map onto each input chunk chunk_indices_count = np.bincount(coords_input_chunk_locations_linear, minlength=np.prod(input.numblocks)) required_input_chunk_indices = np.where(chunk_indices_count > 0)[0] required_input_chunks = input_chunk_locations[required_input_chunk_indices] coord_rc_count = chunk_indices_count[required_input_chunk_indices] # inverse mapping: input chunks to coordinates required_input_chunk_coords_indices = \ [np.where(coords_input_chunk_locations_linear == irc)[0] for irc in required_input_chunk_indices] # STEP 2: for each input chunk that has been associated to at least # one coordinate, calculate the minimal slice required to map all # coordinates that are associated to it (note that resulting slice # coordinates can lie outside of the coordinate's chunk) # determine the slices of the input array that are required for # mapping all coordinates associated to a given input chunk. # Note that this slice can be larger than the given chunk when coords # lie at chunk borders. # (probably there's a more efficient way to do this) input_slices_lower = np.array([np.clip( ndimage_labeled_comprehension( np.floor(coordinates[dim] - order // 2), coords_input_chunk_locations_linear, required_input_chunk_indices, np.min, np.int64, 0), 0, input.shape[dim] - 1) for dim in range(input.ndim)]) input_slices_upper = np.array([np.clip( ndimage_labeled_comprehension( np.ceil(coordinates[dim] + order // 2) + 1, coords_input_chunk_locations_linear, required_input_chunk_indices, np.max, np.int64, 0), 0, input.shape[dim]) for dim in range(input.ndim)]) input_slices = np.array([input_slices_lower, input_slices_upper])\ .swapaxes(1, 2) # STEP 3: For each previously obtained slice and its associated # coordinates, define a dask task and apply ndimage.map_coordinates # prepare building dask graph # define one task per associated input chunk name = "map_coordinates_chunk-%s" % tokenize( input, coordinates, order, mode, cval, prefilter ) keys = [(name, i) for i in range(len(required_input_chunks))] # pair map_coordinates calls with input slices and mapped coordinates values = [] for irc in range(len(required_input_chunks)): ric_slice = [slice( input_slices[0][irc][dim], input_slices[1][irc][dim]) for dim in range(input.ndim)] ric_offset = input_slices[0][irc] values.append(( ndimage_map_coordinates, input[tuple(ric_slice)], coordinates[:, required_input_chunk_coords_indices[irc]] - ric_offset[:, None], None, order, mode, cval, prefilter )) # build dask graph dsk = dict(zip(keys, values)) ar = da.Array(dsk, name, tuple([list(coord_rc_count)]), input.dtype) # STEP 4: rearrange outputs of ndimage.map_coordinates # to match input order orig_order = np.argsort( [ic for ric_ci in required_input_chunk_coords_indices for ic in ric_ci]) # compute result and reorder # (ordering first would probably unnecessarily inflate the task graph) return ar.compute()[orig_order] def map_coordinates(input, coordinates, order=3, mode='constant', cval=0.0, prefilter=False): """ Wraps ndimage.map_coordinates. Both the input and coordinate arrays can be dask arrays. GPU arrays are not supported. For each chunk in the coordinates array, the coordinates are computed and mapped onto the required slices of the input array. One task is is defined for each input array chunk that has been associated to at least one coordinate. The outputs of the tasks are then rearranged to match the input order. For more details see the docstring of '_map_single_coordinates_array_chunk'. Using this function together with schedulers that support parallelism (threads, processes, distributed) makes sense in the case of either a large input array or a large coordinates array. When both arrays are large, it is recommended to use the single-threaded scheduler. A scheduler can be specified using e.g. `with dask.config.set(scheduler='threads'): ...`. input : array_like The input array. coordinates : array_like The coordinates at which to sample the input. order : int, optional The order of the spline interpolation, default is 3. The order has to be in the range 0-5. mode : boundary behavior mode, optional cval : float, optional Value to fill past edges of input if mode is 'constant'. Default is 0.0 prefilter : bool, optional If True, prefilter the input before interpolation. Default is False. Warning: prefilter is True by default in `scipy.ndimage.map_coordinates`. Prefiltering here is performed on a chunk-by-chunk basis, which may lead to different results than `scipy.ndimage.map_coordinates` in case of chunked input arrays and order > 1. Note: prefilter is not necessary when: - You are using nearest neighbour interpolation, by setting order=0 - You are using linear interpolation, by setting order=1, or - You have already prefiltered the input array, using the spline_filter or spline_filter1d functions. Comments: - in case of a small coordinate array, it might make sense to rechunk it into a single chunk - note the different default for `prefilter` compared to `scipy.ndimage.map_coordinates`, which is True by default. """ if "cupy" in str(get_type(input)) or "cupy" in str(get_type(coordinates)): raise NotImplementedError( "GPU cupy arrays are not supported by " "dask_image.ndinterp.map_overlap") # if coordinate array is not a dask array, convert it into one if type(coordinates) is not da.Array: coordinates = da.from_array(coordinates, chunks=coordinates.shape) else: # make sure indices are not split across chunks, i.e. that there's # no chunking along the first dimension if len(coordinates.chunks[0]) > 1: coordinates = da.rechunk( coordinates, (-1,) + coordinates.chunks[1:]) # if the input array is not a dask array, convert it into one if type(input) is not da.Array: input = da.from_array(input, chunks=input.shape) # Map each chunk of the coordinates array onto the entire input array. # 'input' is passed to `_map_single_coordinates_array_chunk` using a bit of # a dirty trick: it is split into its components and passed as a delayed # object, which reconstructs the original array when the task is # executed. Therefore two `compute` calls are required to obtain the # final result, one of which is peformed by # `_map_single_coordinates_array_chunk` # Discussion https://dask.discourse.group/t/passing-dask-objects-to-delayed-computations-without-triggering-compute/1441 # noqa: E501 output = da.map_blocks( _map_single_coordinates_array_chunk, delayed(da.Array)(input.dask, input.name, input.chunks, input.dtype), coordinates, order=order, mode=mode, cval=cval, prefilter=prefilter, dtype=input.dtype, chunks=coordinates.chunks[1:], drop_axis=0, ) return output ================================================ FILE: dask_image/ndinterp/_rotate.py ================================================ # -*- coding: utf-8 -*- import dask.array as da import numpy as np from scipy.special import sindg, cosdg from ._affine_transform import affine_transform def rotate( input_arr, angle, axes=(1, 0), reshape=True, output_chunks=None, **kwargs, ): """Rotate an array using Dask. The array is rotated in the plane defined by the two axes given by the `axes` parameter using spline interpolation of the requested order. Chunkwise processing is performed using `dask_image.ndinterp.affine_transform`, for which further parameters supported by the ndimage functions can be passed as keyword arguments. Notes ----- Differences to `ndimage.rotate`: - currently, prefiltering is not supported (affecting the output in case of interpolation `order > 1`) - default order is 1 - modes 'reflect', 'mirror' and 'wrap' are not supported Arguments are equal to `ndimage.rotate` except for - `output` (not present here) - `output_chunks` (relevant in the dask array context) Parameters ---------- input_arr : array_like (Numpy Array, Cupy Array, Dask Array...) The image array. angle : float The rotation angle in degrees. axes : tuple of 2 ints, optional The two axes that define the plane of rotation. Default is the first two axes. reshape : bool, optional If `reshape` is true, the output shape is adapted so that the input array is contained completely in the output. Default is True. output_chunks : tuple of ints, optional The shape of the chunks of the output Dask Array. **kwargs : dict, optional Additional keyword arguments are passed to `dask_image.ndinterp.affine_transform`. Returns ------- rotate : Dask Array A dask array representing the rotated input. Examples -------- >>> from scipy import ndimage, misc >>> import matplotlib.pyplot as plt >>> import dask.array as da >>> fig = plt.figure(figsize=(10, 3)) >>> ax1, ax2, ax3 = fig.subplots(1, 3) >>> img = da.from_array(misc.ascent(),chunks=(64,64)) >>> img_45 = dask_image.ndinterp.rotate(img, 45, reshape=False) >>> full_img_45 = dask_image.ndinterp.rotate(img, 45, reshape=True) >>> ax1.imshow(img, cmap='gray') >>> ax1.set_axis_off() >>> ax2.imshow(img_45, cmap='gray') >>> ax2.set_axis_off() >>> ax3.imshow(full_img_45, cmap='gray') >>> ax3.set_axis_off() >>> fig.set_tight_layout(True) >>> plt.show() >>> print(img.shape) (512, 512) >>> print(img_45.shape) (512, 512) >>> print(full_img_45.shape) (724, 724) """ if not isinstance(input_arr, da.core.Array): input_arr = da.from_array(input_arr) if output_chunks is None: output_chunks = input_arr.chunksize ndim = input_arr.ndim if ndim < 2: raise ValueError('input array should be at least 2D') axes = list(axes) if len(axes) != 2: raise ValueError('axes should contain exactly two values') if not all([float(ax).is_integer() for ax in axes]): raise ValueError('axes should contain only integer values') if axes[0] < 0: axes[0] += ndim if axes[1] < 0: axes[1] += ndim if axes[0] < 0 or axes[1] < 0 or axes[0] >= ndim or axes[1] >= ndim: raise ValueError('invalid rotation plane specified') axes.sort() c, s = cosdg(angle), sindg(angle) rot_matrix = np.array([[c, s], [-s, c]]) img_shape = np.asarray(input_arr.shape) in_plane_shape = img_shape[axes] if reshape: # Compute transformed input bounds iy, ix = in_plane_shape in_bounds = np.array([[0, 0, iy, iy], [0, ix, 0, ix]]) out_bounds = rot_matrix @ in_bounds # Compute the shape of the transformed input plane out_plane_shape = (np.ptp(out_bounds, axis=1) + 0.5).astype(int) else: out_plane_shape = img_shape[axes] output_shape = np.array(img_shape) output_shape[axes] = out_plane_shape output_shape = tuple(output_shape) out_center = rot_matrix @ ((out_plane_shape - 1) / 2) in_center = (in_plane_shape - 1) / 2 offset = in_center - out_center matrix_nd = np.eye(ndim) offset_nd = np.zeros(ndim) for o_x, idx in enumerate(axes): matrix_nd[idx, axes[0]] = rot_matrix[o_x, 0] matrix_nd[idx, axes[1]] = rot_matrix[o_x, 1] offset_nd[idx] = offset[o_x] output = affine_transform( input_arr, matrix=matrix_nd, offset=offset_nd, output_shape=output_shape, output_chunks=output_chunks, **kwargs, ) return output ================================================ FILE: dask_image/ndinterp/_spline_filters.py ================================================ # -*- coding: utf-8 -*- import functools import math import dask.array as da import numpy as np import scipy from ..dispatch._dispatch_ndinterp import ( dispatch_spline_filter, dispatch_spline_filter1d, ) from ..ndfilters._utils import _get_depth_boundary, _update_wrapper # magnitude of the maximum filter pole for each order # (obtained from scipy/ndimage/src/ni_splines.c) _maximum_pole = { 2: 0.171572875253809902396622551580603843, 3: 0.267949192431122706472553658494127633, 4: 0.361341225900220177092212841325675255, 5: 0.430575347099973791851434783493520110, } def _get_default_depth(order, tol=1e-8): """Determine the approximate depth needed for a given tolerance. Here depth is chosen as the smallest integer such that ``|p| ** n < tol`` where `|p|` is the magnitude of the largest pole in the IIR filter. """ return math.ceil(np.log(tol) / np.log(_maximum_pole[order])) @_update_wrapper(scipy.ndimage.spline_filter) def spline_filter( image, order=3, output=np.float64, mode='mirror', output_chunks=None, *, depth=None, **kwargs ): if not isinstance(image, da.core.Array): image = da.from_array(image) # use dispatching mechanism to determine backend spline_filter_method = dispatch_spline_filter(image) try: dtype = np.dtype(output) except TypeError: # pragma: no cover raise TypeError( # pragma: no cover "Could not coerce the provided output to a dtype. " "Passing array to output is not currently supported." ) if depth is None: depth = _get_default_depth(order) if mode == 'wrap': raise NotImplementedError( "mode='wrap' is unsupported. It is recommended to use 'grid-wrap' " "instead." ) # Note: depths of 12 and 24 give results matching SciPy to approximately # single and double precision accuracy, respectively. boundary = "periodic" if mode == 'grid-wrap' else "none" depth, boundary = _get_depth_boundary(image.ndim, depth, boundary) # cannot pass a func kwarg named "output" to map_overlap spline_filter_method = functools.partial(spline_filter_method, output=dtype) result = image.map_overlap( spline_filter_method, depth=depth, boundary=boundary, dtype=dtype, meta=image._meta, # spline_filter kwargs order=order, mode=mode, ) return result @_update_wrapper(scipy.ndimage.spline_filter1d) def spline_filter1d( image, order=3, axis=-1, output=np.float64, mode='mirror', output_chunks=None, *, depth=None, **kwargs ): if not isinstance(image, da.core.Array): image = da.from_array(image) # use dispatching mechanism to determine backend spline_filter1d_method = dispatch_spline_filter1d(image) try: dtype = np.dtype(output) except TypeError: # pragma: no cover raise TypeError( # pragma: no cover "Could not coerce the provided output to a dtype. " "Passing array to output is not currently supported." ) if depth is None: depth = _get_default_depth(order) # use depth 0 on all axes except the filtered axis if not np.isscalar(depth): raise ValueError("depth must be a scalar value") depths = [0] * image.ndim depths[axis] = depth if mode == 'wrap': raise NotImplementedError( "mode='wrap' is unsupported. It is recommended to use 'grid-wrap' " "instead." ) # cannot pass a func kwarg named "output" to map_overlap spline_filter1d_method = functools.partial(spline_filter1d_method, output=dtype) result = image.map_overlap( spline_filter1d_method, depth=tuple(depths), boundary="periodic" if mode == 'grid-wrap' else "none", dtype=dtype, meta=image._meta, # spline_filter1d kwargs order=order, axis=axis, mode=mode, ) return result ================================================ FILE: dask_image/ndmeasure/__init__.py ================================================ # -*- coding: utf-8 -*- import collections import functools import operator import warnings from dask import compute, delayed import dask.config as dask_config import dask.array as da import dask.bag as db import numpy as np from . import _utils from ._utils import _label from ._utils._find_objects import ( _array_chunk_location, _find_bounding_boxes, _find_objects, ) __all__ = [ "area", "center_of_mass", "extrema", "histogram", "label", "labeled_comprehension", "maximum", "maximum_position", "mean", "median", "minimum", "minimum_position", "standard_deviation", "sum", "sum_labels", "variance", ] def area(image, label_image=None, index=None): """Find the area of specified subregions in an image. Parameters ---------- image : ndarray N-D image data label_image : ndarray, optional Image features noted by integers. If None (default), returns area of total image dimensions. index : int or sequence of ints, optional Labels to include in output. If None (default), all values where non-zero ``label_image`` are used. The ``index`` argument only works when ``label_image`` is specified. Returns ------- area : ndarray Area of ``index`` selected regions from ``label_image``. Example ------- >>> import dask.array as da >>> image = da.random.random((3, 3)) >>> label_image = da.from_array( [[1, 1, 0], [1, 0, 3], [0, 7, 0]], chunks=(1, 3)) >>> # No labels given, returns area of total image dimensions >>> area(image) 9 >>> # Combined area of all non-zero labels >>> area(image, label_image).compute() 5 >>> # Areas of selected labels selected with the ``index`` keyword argument >>> area(image, label_image, index=[0, 1, 2, 3]).compute() array([4, 3, 0, 1], dtype=int64) """ if label_image is None: return da.prod(np.array([i for i in image.shape])) else: image, label_image, index = _utils._norm_input_labels_index( image, label_image, index ) ones = da.ones( label_image.shape, dtype=bool, chunks=label_image.chunks ) area_lbl = labeled_comprehension( ones, label_image, index, len, int, int(0) ) return area_lbl def center_of_mass(image, label_image=None, index=None): """ Find the center of mass over an image at specified subregions. Parameters ---------- image : ndarray N-D image data label_image : ndarray, optional Image features noted by integers. If None (default), all values. index : int or sequence of ints, optional Labels to include in output. If None (default), all values where non-zero ``label_image`` are used. The ``index`` argument only works when ``label_image`` is specified. Returns ------- center_of_mass : ndarray Coordinates of centers-of-mass of ``image`` over the ``index`` selected regions from ``label_image``. """ image, label_image, index = _utils._norm_input_labels_index( image, label_image, index ) # SciPy transposes these for some reason. # So we do the same thing here. # This only matters if index is some array. index = index.T out_dtype = np.dtype([("com", float, (image.ndim,))]) default_1d = np.full((1,), np.nan, dtype=out_dtype) func = functools.partial( _utils._center_of_mass, shape=image.shape, dtype=out_dtype ) com_lbl = labeled_comprehension( image, label_image, index, func, out_dtype, default_1d[0], pass_positions=True ) com_lbl = com_lbl["com"] return com_lbl def extrema(image, label_image=None, index=None): """ Find the min and max with positions over an image at specified subregions. Parameters ---------- image : ndarray N-D image data label_image : ndarray, optional Image features noted by integers. If None (default), all values. index : int or sequence of ints, optional Labels to include in output. If None (default), all values where non-zero ``label_image`` are used. The ``index`` argument only works when ``label_image`` is specified. Returns ------- minimums, maximums, min_positions, max_positions : tuple of ndarrays Values and coordinates of minimums and maximums in each feature. """ image, label_image, index = _utils._norm_input_labels_index( image, label_image, index ) out_dtype = np.dtype([ ("min_val", image.dtype), ("max_val", image.dtype), ("min_pos", np.dtype(int), image.ndim), ("max_pos", np.dtype(int), image.ndim) ]) default_1d = np.zeros((1,), dtype=out_dtype) func = functools.partial( _utils._extrema, shape=image.shape, dtype=out_dtype ) extrema_lbl = labeled_comprehension( image, label_image, index, func, out_dtype, default_1d[0], pass_positions=True ) extrema_lbl = collections.OrderedDict([ (k, extrema_lbl[k]) for k in ["min_val", "max_val", "min_pos", "max_pos"] ]) for pos_key in ["min_pos", "max_pos"]: pos_nd = extrema_lbl[pos_key] if index.ndim == 0: pos_nd = da.squeeze(pos_nd) elif index.ndim > 1: pos_nd = pos_nd.reshape( (int(np.prod(pos_nd.shape[:-1])), pos_nd.shape[-1]) ) extrema_lbl[pos_key] = pos_nd result = tuple(extrema_lbl.values()) return result def find_objects(label_image): """Return bounding box slices for each object labelled by integers. Parameters ---------- label_image : ndarray Image features noted by integers. Returns ------- Dask dataframe Each row represents an individual integer label. Columns contain the slice information for the object boundaries in each dimension (dimensions are named: 0, 1, ..., nd). Notes ----- You must have the optional dependencies ``dask[dataframe]`` and ``pandas`` installed to use the ``find_objects`` function. They can be installed together via the ``dataframe`` extras group: ``pip install dask-image[dataframe]``. """ try: import pandas # noqa: F401 # used by the private helpers below import dask.dataframe as dd except ImportError as e: raise ImportError( "dask_image.ndmeasure.find_objects requires the optional " "dependencies `dask[dataframe]` and `pandas`. Install them " "with `pip install dask-image[dataframe]`." ) from e if label_image.dtype.char not in np.typecodes['AllInteger']: raise ValueError("find_objects only accepts integer dtype arrays") block_iter = zip( np.ndindex(*label_image.numblocks), map(functools.partial(operator.getitem, label_image), da.core.slices_from_chunks(label_image.chunks)) ) arrays = [] for block_id, block in block_iter: array_location = _array_chunk_location(block_id, label_image.chunks) arrays.append(delayed(_find_bounding_boxes)(block, array_location)) bag = db.from_sequence(arrays) result = bag.fold( functools.partial(_find_objects, label_image.ndim), split_every=2 ).to_delayed() meta = dd.utils.make_meta([(i, object) for i in range(label_image.ndim)]) # avoid the user having to call compute twice on result result = delayed(compute)(result)[0] with dask_config.set({'dataframe.convert-string': False}): result = dd.from_delayed( result, meta=meta, prefix="find-objects-", verify_meta=False ) return result def histogram(image, min, max, bins, label_image=None, index=None): """ Find the histogram over an image at specified subregions. Histogram calculates the frequency of values in an array within bins determined by ``min``, ``max``, and ``bins``. The ``label_image`` and ``index`` keywords can limit the scope of the histogram to specified sub-regions within the array. Parameters ---------- image : ndarray N-D image data min : int Minimum value of range of histogram bins. max : int Maximum value of range of histogram bins. bins : int Number of bins. label_image : ndarray, optional Image features noted by integers. If None (default), all values. index : int or sequence of ints, optional Labels to include in output. If None (default), all values where non-zero ``label_image`` are used. The ``index`` argument only works when ``label_image`` is specified. Returns ------- histogram : ndarray Histogram of ``image`` over the ``index`` selected regions from ``label_image``. """ image, label_image, index = _utils._norm_input_labels_index( image, label_image, index ) min = int(min) max = int(max) bins = int(bins) func = functools.partial(_utils._histogram, min=min, max=max, bins=bins) result = labeled_comprehension( image, label_image, index, func, object, None ) return result def label(image, structure=None, wrap_axes=None): """ Label features in an array. Parameters ---------- image : ndarray An array-like object to be labeled. Any non-zero values in ``image`` are counted as features and zero values are considered the background. structure : ndarray, optional A structuring element that defines feature connections. ``structure`` must be symmetric. If no structuring element is provided, one is automatically generated with a squared connectivity equal to one. That is, for a 2-D ``image`` array, the default structuring element is:: [[0,1,0], [1,1,1], [0,1,0]] wrap_axes : tuple of int, optional Whether labels should be wrapped across array boundaries, and if so which axes. This feature is not present in `ndimage.label`. Examples: - (0,) only wrap across the 0th axis. - (0, 1) wrap across the 0th and 1st axis. - (0, 1, 3) wrap across 0th, 1st and 3rd axis. Returns ------- label : ndarray or int An integer ndarray where each unique feature in ``image`` has a unique label in the returned array. num_features : int How many objects were found. """ image = da.asarray(image) labeled_blocks = np.empty(image.numblocks, dtype=object) # First, label each block independently, incrementing the labels in that # block by the total number of labels from previous blocks. This way, each # block's labels are globally unique. block_iter = zip( np.ndindex(*image.numblocks), map(functools.partial(operator.getitem, image), da.core.slices_from_chunks(image.chunks)) ) index, input_block = next(block_iter) labeled_blocks[index], total = _label.block_ndi_label_delayed(input_block, structure) for index, input_block in block_iter: labeled_block, n = _label.block_ndi_label_delayed(input_block, structure) block_label_offset = da.where(labeled_block > 0, total, _label.LABEL_DTYPE.type(0)) labeled_block += block_label_offset labeled_blocks[index] = labeled_block total += n # Put all the blocks together block_labeled = da.block(labeled_blocks.tolist()) # Now, build a label connectivity graph that groups labels across blocks. # We use this graph to find connected components and then relabel each # block according to those. label_groups = _label.label_adjacency_graph( block_labeled, structure, total, wrap_axes=wrap_axes ) new_labeling = _label.connected_components_delayed(label_groups) relabeled = _label.relabel_blocks(block_labeled, new_labeling) n = da.max(relabeled) return (relabeled, n) def labeled_comprehension(image, label_image, index, func, out_dtype, default, pass_positions=False): """ Compute a function over an image at specified subregions. Roughly equivalent to [func(image[labels == i]) for i in index]. Sequentially applies an arbitrary function (that works on array_like image) to subsets of an n-D image array specified by ``label_image`` and ``index``. The option exists to provide the function with positional parameters as the second argument. Parameters ---------- image : ndarray N-D image data label_image : ndarray, optional Image features noted by integers. If None (default), all values. index : int or sequence of ints, optional Labels to include in output. If None (default), all values where non-zero ``label_image`` are used. The ``index`` argument only works when ``label_image`` is specified. func : callable Python function to apply to ``label_image`` from ``image``. out_dtype : dtype Dtype to use for ``result``. default : int, float or None Default return value when a element of ``index`` does not exist in ``label_image``. pass_positions : bool, optional If True, pass linear indices to ``func`` as a second argument. Default is False. Returns ------- result : ndarray Result of applying ``func`` on ``image`` over the ``index`` selected regions from ``label_image``. """ image, label_image, index = _utils._norm_input_labels_index( image, label_image, index ) out_dtype = np.dtype(out_dtype) default_1d = np.full((1,), default, dtype=out_dtype) pass_positions = bool(pass_positions) args = (image,) if pass_positions: positions = _utils._ravel_shape_indices( image.shape, chunks=image.chunks ) args = (image, positions) result = np.empty(index.shape, dtype=object) for i in np.ndindex(index.shape): lbl_mtch_i = (label_image == index[i]) args_lbl_mtch_i = tuple(e[lbl_mtch_i] for e in args) result[i] = _utils._labeled_comprehension_func( func, out_dtype, default_1d, *args_lbl_mtch_i ) for i in range(result.ndim - 1, -1, -1): result2 = result[..., 0] for j in np.ndindex(index.shape[:i]): result2[j] = da.stack(result[j].tolist(), axis=0) result = result2 result = result[()][..., 0] return result def maximum(image, label_image=None, index=None): """ Find the maxima over an image at specified subregions. Parameters ---------- image : ndarray N-D image data label_image : ndarray, optional Image features noted by integers. If None (default), all values. index : int or sequence of ints, optional Labels to include in output. If None (default), all values where non-zero ``label_image`` are used. The ``index`` argument only works when ``label_image`` is specified. Returns ------- maxima : ndarray Maxima of ``image`` over the ``index`` selected regions from ``label_image``. """ image, label_image, index = _utils._norm_input_labels_index( image, label_image, index ) return labeled_comprehension( image, label_image, index, np.max, image.dtype, image.dtype.type(0) ) def maximum_position(image, label_image=None, index=None): """ Find the positions of maxima over an image at specified subregions. For each region specified by ``label_image``, the position of the maximum value of ``image`` within the region is returned. Parameters ---------- image : ndarray N-D image data label_image : ndarray, optional Image features noted by integers. If None (default), all values. index : int or sequence of ints, optional Labels to include in output. If None (default), all values where non-zero ``label_image`` are used. The ``index`` argument only works when ``label_image`` is specified. Returns ------- maxima_positions : ndarray Maxima positions of ``image`` over the ``index`` selected regions from ``label_image``. """ image, label_image, index = _utils._norm_input_labels_index( image, label_image, index ) if index.shape: index = index.flatten() out_dtype = np.dtype([("pos", int, (image.ndim,))]) default_1d = np.zeros((1,), dtype=out_dtype) func = functools.partial( _utils._argmax, shape=image.shape, dtype=out_dtype ) max_pos_lbl = labeled_comprehension( image, label_image, index, func, out_dtype, default_1d[0], pass_positions=True ) max_pos_lbl = max_pos_lbl["pos"] if index.shape == tuple(): max_pos_lbl = da.squeeze(max_pos_lbl) return max_pos_lbl def mean(image, label_image=None, index=None): """ Find the mean over an image at specified subregions. Parameters ---------- image : ndarray N-D image data label_image : ndarray, optional Image features noted by integers. If None (default), all values. index : int or sequence of ints, optional Labels to include in output. If None (default), all values where non-zero ``label_image`` are used. The ``index`` argument only works when ``label_image`` is specified. Returns ------- means : ndarray Mean of ``image`` over the ``index`` selected regions from ``label_image``. """ image, label_image, index = _utils._norm_input_labels_index( image, label_image, index ) nan = np.float64(np.nan) mean_lbl = labeled_comprehension( image, label_image, index, np.mean, np.float64, nan ) return mean_lbl def median(image, label_image=None, index=None): """ Find the median over an image at specified subregions. Parameters ---------- image : ndarray N-D image data label_image : ndarray, optional Image features noted by integers. If None (default), all values. index : int or sequence of ints, optional Labels to include in output. If None (default), all values where non-zero ``label_image`` are used. The ``index`` argument only works when ``label_image`` is specified. Returns ------- medians : ndarray Median of ``image`` over the ``index`` selected regions from ``label_image``. """ image, label_image, index = _utils._norm_input_labels_index( image, label_image, index ) nan = np.float64(np.nan) return labeled_comprehension( image, label_image, index, np.median, np.float64, nan ) def minimum(image, label_image=None, index=None): """ Find the minima over an image at specified subregions. Parameters ---------- image : ndarray N-D image data label_image : ndarray, optional Image features noted by integers. If None (default), all values. index : int or sequence of ints, optional Labels to include in output. If None (default), all values where non-zero ``label_image`` are used. The ``index`` argument only works when ``label_image`` is specified. Returns ------- minima : ndarray Minima of ``image`` over the ``index`` selected regions from ``label_image``. """ image, label_image, index = _utils._norm_input_labels_index( image, label_image, index ) return labeled_comprehension( image, label_image, index, np.min, image.dtype, image.dtype.type(0) ) def minimum_position(image, label_image=None, index=None): """ Find the positions of minima over an image at specified subregions. Parameters ---------- image : ndarray N-D image data label_image : ndarray, optional Image features noted by integers. If None (default), all values. index : int or sequence of ints, optional Labels to include in output. If None (default), all values where non-zero ``label_image`` are used. The ``index`` argument only works when ``label_image`` is specified. Returns ------- minima_positions : ndarray Maxima positions of ``image`` over the ``index`` selected regions from ``label_image``. """ image, label_image, index = _utils._norm_input_labels_index( image, label_image, index ) if index.shape: index = index.flatten() out_dtype = np.dtype([("pos", int, (image.ndim,))]) default_1d = np.zeros((1,), dtype=out_dtype) func = functools.partial( _utils._argmin, shape=image.shape, dtype=out_dtype ) min_pos_lbl = labeled_comprehension( image, label_image, index, func, out_dtype, default_1d[0], pass_positions=True ) min_pos_lbl = min_pos_lbl["pos"] if index.shape == tuple(): min_pos_lbl = da.squeeze(min_pos_lbl) return min_pos_lbl def standard_deviation(image, label_image=None, index=None): """ Find the standard deviation over an image at specified subregions. Parameters ---------- image : ndarray N-D image data label_image : ndarray, optional Image features noted by integers. If None (default), all values. index : int or sequence of ints, optional Labels to include in output. If None (default), all values where non-zero ``label_image`` are used. The ``index`` argument only works when ``label_image`` is specified. Returns ------- standard_deviation : ndarray Standard deviation of ``image`` over the ``index`` selected regions from ``label_image``. """ image, label_image, index = _utils._norm_input_labels_index( image, label_image, index ) nan = np.float64(np.nan) std_lbl = labeled_comprehension( image, label_image, index, np.std, np.float64, nan ) return std_lbl def sum_labels(image, label_image=None, index=None): """ Find the sum of all pixels over specified subregions of an image. Parameters ---------- image : ndarray N-D image data label_image : ndarray, optional Image features noted by integers. If None (default), all values. index : int or sequence of ints, optional Labels to include in output. If None (default), all values where non-zero ``label_image`` are used. The ``index`` argument only works when ``label_image`` is specified. Returns ------- sum_lbl : ndarray Sum of ``image`` over the ``index`` selected regions from ``label_image``. """ image, label_image, index = _utils._norm_input_labels_index( image, label_image, index ) sum_lbl = labeled_comprehension( image, label_image, index, np.sum, np.float64, np.float64(0) ) return sum_lbl def sum(image, label_image=None, index=None): """DEPRECATED FUNCTION. Use `sum_labels` instead.""" warnings.warn("DEPRECATED FUNCTION. Use `sum_labels` instead.", DeprecationWarning) return sum_labels(image, label_image=label_image, index=index) def variance(image, label_image=None, index=None): """ Find the variance over an image at specified subregions. Parameters ---------- image : ndarray N-D image data label_image : ndarray, optional Image features noted by integers. If None (default), all values. index : int or sequence of ints, optional Labels to include in output. If None (default), all values where non-zero ``label_image`` are used. The ``index`` argument only works when ``label_image`` is specified. Returns ------- variance : ndarray Variance of ``image`` over the ``index`` selected regions from ``label_image``. """ image, label_image, index = _utils._norm_input_labels_index( image, label_image, index ) nan = np.float64(np.nan) var_lbl = labeled_comprehension( image, label_image, index, np.var, np.float64, nan ) return var_lbl ================================================ FILE: dask_image/ndmeasure/_utils/__init__.py ================================================ # -*- coding: utf-8 -*- import warnings import dask import dask.array as da import numpy as np def _norm_input_labels_index(image, label_image=None, index=None): """ Normalize arguments to a standard form. """ image = da.asarray(image) if label_image is None: label_image = da.ones( image.shape, dtype=int, chunks=image.chunks, ) index = da.from_array(np.array(1, dtype=int)) elif index is None: label_image = (label_image > 0).astype(int) index = da.from_array(np.array(1, dtype=int)) label_image = da.asarray(label_image) index = da.asarray(index) if index.ndim > 1: warnings.warn( "Having index with dimensionality greater than 1 is undefined.", FutureWarning ) if image.shape != label_image.shape: raise ValueError( "The image and label_image arrays must be the same shape." ) return (image, label_image, index) def _ravel_shape_indices_kernel(*args): args2 = tuple( a[i * (None,) + (slice(None),) + (len(args) - i - 1) * (None,)] for i, a in enumerate(args) ) return sum(args2) def _ravel_shape_indices(dimensions, dtype=int, chunks=None): """ Gets the raveled indices shaped like input. """ indices = [ da.arange( 0, np.prod(dimensions[i:], dtype=dtype), np.prod(dimensions[i + 1:], dtype=dtype), dtype=dtype, chunks=c ) for i, c in enumerate(chunks) ] indices = da.blockwise( _ravel_shape_indices_kernel, tuple(range(len(indices))), *sum([(a, (i,)) for i, a in enumerate(indices)], tuple()), dtype=dtype ) return indices def _argmax(a, positions, shape, dtype): """ Find original array position corresponding to the maximum. """ result = np.empty((1,), dtype=dtype) pos_nd = np.unravel_index(positions[np.argmax(a)], shape) for i, pos_nd_i in enumerate(pos_nd): result["pos"][0, i] = pos_nd_i return result[0] def _argmin(a, positions, shape, dtype): """ Find original array position corresponding to the minimum. """ result = np.empty((1,), dtype=dtype) pos_nd = np.unravel_index(positions[np.argmin(a)], shape) for i, pos_nd_i in enumerate(pos_nd): result["pos"][0, i] = pos_nd_i return result[0] def _center_of_mass(a, positions, shape, dtype): """ Find the center of mass for each ROI. """ result = np.empty((1,), dtype=dtype) positions_nd = np.unravel_index(positions, shape) a_sum = np.sum(a) a_wt_i = np.empty(a.shape) for i, pos_nd_i in enumerate(positions_nd): a_wt_sum_i = np.multiply(a, pos_nd_i, out=a_wt_i).sum() result["com"][0, i] = a_wt_sum_i / a_sum return result[0] def _extrema(a, positions, shape, dtype): """ Find minimum and maximum as well as positions for both. """ result = np.empty((1,), dtype=dtype) int_min_pos = np.argmin(a) int_max_pos = np.argmax(a) result["min_val"] = a[int_min_pos] result["max_val"] = a[int_max_pos] min_pos_nd = np.unravel_index(positions[int_min_pos], shape) max_pos_nd = np.unravel_index(positions[int_max_pos], shape) for i in range(len(shape)): result["min_pos"][0, i] = min_pos_nd[i] result["max_pos"][0, i] = max_pos_nd[i] return result[0] def _histogram(image, min, max, bins): """ Delayed wrapping of NumPy's histogram Also reformats the arguments. """ return np.histogram(image, bins, (min, max))[0] @dask.delayed def _labeled_comprehension_delayed(func, out_dtype, default, a, positions=None): """ Wrapped delayed labeled comprehension function Included in the module for pickling purposes. Also handle cases where computation should not occur. """ result = np.empty((1,), dtype=out_dtype) if a.size: if positions is None: result[0] = func(a) else: result[0] = func(a, positions) else: result[0] = default[0] return result def _labeled_comprehension_func(func, out_dtype, default, a, positions=None): """ Wrapped labeled comprehension function Ensures the result is a proper Dask Array and the computation delayed. """ return da.from_delayed( _labeled_comprehension_delayed(func, out_dtype, default, a, positions), (1,), out_dtype ) ================================================ FILE: dask_image/ndmeasure/_utils/_find_objects.py ================================================ import numpy as np from dask.delayed import Delayed import dask.config as dask_config def _array_chunk_location(block_id, chunks): """Pixel coordinate of top left corner of the array chunk.""" array_location = [] for idx, chunk in zip(block_id, chunks): array_location.append(sum(chunk[:idx])) return tuple(array_location) def _find_bounding_boxes(x, array_location): """An alternative to scipy.ndimage.find_objects. We use this alternative because scipy.ndimage.find_objects returns a tuple of length N, where N is the largest integer label. This is not ideal for distributed labels, where there might be only one or two objects in an image chunk labelled with very large integers. This alternative function returns a pandas dataframe, with one row per object found in the image chunk. """ import pandas as pd unique_vals = np.unique(x) unique_vals = unique_vals[unique_vals != 0] result = {} for val in unique_vals: positions = np.where(x == val) slices = tuple( slice( np.min(pos) + array_location[i], np.max(pos) + 1 + array_location[i] ) for i, pos in enumerate(positions) ) result[val] = slices column_names = [i for i in range(x.ndim)] # column names are: 0, 1, ... nD return pd.DataFrame.from_dict(result, orient='index', columns=column_names) def _combine_slices(slices): "Return the union of all slices." if len(slices) == 1: return slices[0] else: start = min([sl.start for sl in slices]) stop = max([sl.stop for sl in slices]) return slice(start, stop) def _merge_bounding_boxes(x, ndim): "Merge the bounding boxes describing objects over multiple image chunks." import pandas as pd x = x.dropna() data = {} # For each dimension in the array, # pick out the slice values belonging to that dimension # and combine the slices # (i.e. find the union; the slice expanded to all input slices). for i in range(ndim): # Array dimensions are labelled by a number followed by an underscroe # i.e. column labels are: 0_x, 1_x, 2_x, ... 0_y, 1_y, 2_y, ... # (x and y represent the pair of chunks label slices are merged from) slices = [x[ii] for ii in x.index if str(ii).startswith(str(i))] combined_slices = _combine_slices(slices) data[i] = combined_slices result = pd.Series(data=data, index=[i for i in range(ndim)], name=x.name) return result def _find_objects(ndim, df1, df2): """Main utility function for find_objects.""" import pandas as pd import dask.dataframe as dd meta = dd.utils.make_meta([(i, object) for i in range(ndim)]) if isinstance(df1, Delayed): with dask_config.set({'dataframe.convert-string': False}): df1 = dd.from_delayed(df1, meta=meta) if isinstance(df2, Delayed): with dask_config.set({'dataframe.convert-string': False}): df2 = dd.from_delayed(df2, meta=meta) if len(df1) > 0 and len(df2) > 0: ddf = dd.merge( df1, df2, how="outer", left_index=True, right_index=True) elif len(df1) > 0: ddf = df1 elif len(df2) > 0: ddf = df2 else: ddf = pd.DataFrame() result = ddf.apply(_merge_bounding_boxes, ndim=ndim, axis=1, meta=meta) return result ================================================ FILE: dask_image/ndmeasure/_utils/_label.py ================================================ # -*- coding: utf-8 -*- import operator import dask import dask.array as da import numpy as np import scipy.ndimage import scipy.sparse import scipy.sparse.csgraph def _get_ndimage_label_dtype(): return scipy.ndimage.label([1, 0, 1])[0].dtype LABEL_DTYPE = _get_ndimage_label_dtype() def _get_connected_components_dtype(): a = np.empty((0, 0), dtype=int) return scipy.sparse.csgraph.connected_components(a)[1].dtype CONN_COMP_DTYPE = _get_connected_components_dtype() def relabel_blocks(block_labeled, new_labeling): """ Relabel a block-labeled array based on ``new_labeling``. Parameters ---------- block_labeled : array of int The input label array. new_labeling : 1D array of int A new labeling, such that ``labeling[i] = j`` implies that any element in ``array`` valued ``i`` should be relabeled to ``j``. Returns ------- relabeled : array of int, same shape as ``array`` The relabeled input array. """ new_labeling = new_labeling.astype(LABEL_DTYPE) relabeled = da.map_blocks(operator.getitem, new_labeling, block_labeled, dtype=LABEL_DTYPE, chunks=block_labeled.chunks) return relabeled def _unique_axis(a, axis=0): """Find unique subarrays in axis in N-D array.""" at = np.ascontiguousarray(a.swapaxes(0, axis)) dt = np.dtype([("values", at.dtype, at.shape[1:])]) atv = at.view(dt) r = np.unique(atv)["values"].swapaxes(0, axis) return r def _across_block_label_grouping(face, structure): """ Find a grouping of labels across block faces. We assume that the labels on either side of the block face are unique to that block. This is enforced elsewhere. Parameters ---------- face : array-like This is the boundary, of thickness (2,), between two blocks. structure : array-like Structuring element for the labeling of the face. This should have length 3 along each axis and have the same number of dimensions as ``face``. Returns ------- grouped : array of int, shape (2, M) If a column of ``grouped`` contains the values ``i`` and ``j``, it implies that labels ``i`` and ``j`` belong in the same group. These are edges in a global label connectivity graph. Examples -------- >>> face = np.array([[1, 1, 0, 2, 2, 0, 8], ... [0, 7, 7, 7, 7, 0, 9]]) >>> structure = np.ones((3, 3), dtype=bool) >>> _across_block_label_grouping(face, structure) array([[1, 2, 8], [2, 7, 9]], dtype=np.int32) This shows that 1-2 are connected, 2-7 are connected, and 8-9 are connected. The resulting graph is (1-2-7), (8-9). """ common_labels = scipy.ndimage.label(face, structure)[0] matching = np.stack((common_labels.ravel(), face.ravel()), axis=1) unique_matching = _unique_axis(matching) valid = np.all(unique_matching, axis=1) unique_valid_matching = unique_matching[valid] common_labels, labels = unique_valid_matching.T in_group = np.flatnonzero(np.diff(common_labels) == 0) i = np.take(labels, in_group) j = np.take(labels, in_group + 1) grouped = np.stack((i, j), axis=0) return grouped def _across_block_label_grouping_delayed(face, structure): """Delayed version of :func:`_across_block_label_grouping`.""" _across_block_label_grouping_ = dask.delayed(_across_block_label_grouping) grouped = _across_block_label_grouping_(face, structure) return da.from_delayed(grouped, shape=(2, np.nan), dtype=LABEL_DTYPE) @dask.delayed def _to_csr_matrix(i, j, n): """Using i and j as coo-format coordinates, return csr matrix.""" v = np.ones_like(i) mat = scipy.sparse.coo_matrix((v, (i, j)), shape=(n, n)) return mat.tocsr() def label_adjacency_graph(labels, structure, nlabels, wrap_axes=None): """ Adjacency graph of labels between chunks of ``labels``. Each chunk in ``labels`` has been labeled independently, and the labels in different chunks are guaranteed to be unique. Here we construct a graph connecting labels in different chunks that correspond to the same logical label in the global volume. This is true if the two labels "touch" across the block face as defined by the input ``structure``. Parameters ---------- labels : dask array of int The input labeled array, where each chunk is independently labeled. structure : array of bool Structuring element, shape (3,) * labels.ndim. nlabels : delayed int The total number of labels in ``labels`` *before* correcting for global consistency. wrap_axes : tuple of int, optional Should labels be wrapped across array boundaries, and if so which axes. - (0,) only wrap over the 0th axis. - (0, 1) wrap over the 0th and 1st axis. - (0, 1, 3) wrap over 0th, 1st and 3rd axis. Returns ------- mat : delayed scipy.sparse.csr_matrix This matrix has value 1 at (i, j) if label i is connected to label j in the global volume, 0 everywhere else. """ if structure is None: structure = scipy.ndimage.generate_binary_structure(labels.ndim, 1) face_slices = _chunk_faces( labels.chunks, labels.shape, structure, wrap_axes=wrap_axes ) all_mappings = [da.empty((2, 0), dtype=LABEL_DTYPE, chunks=1)] for face_slice in face_slices: face = labels[face_slice] mapped = _across_block_label_grouping_delayed(face, structure) all_mappings.append(mapped) all_mappings = da.concatenate(all_mappings, axis=1) i, j = all_mappings mat = _to_csr_matrix(i, j, nlabels + 1) return mat def _chunk_faces(chunks, shape, structure, wrap_axes=None): """ Return slices for two-pixel-wide boundaries between chunks. Parameters ---------- chunks : tuple of tuple of int The chunk specification of the array. shape : tuple of int The shape of the array. structure: array of bool Structuring element, shape (3,) * ndim. wrap_axes : tuple of int, optional Should labels be wrapped across array boundaries, and if so which axes. - (0,) only wrap over the 0th axis. - (0, 1) wrap over the 0th and 1st axis. - (0, 1, 3) wrap over 0th, 1st and 3rd axis. Yields ------- tuple of slices Each element indexes a face between two chunks. Examples -------- >>> import dask.array as da >>> import scipy.ndimage as ndi >>> a = da.arange(110, chunks=110).reshape((10, 11)).rechunk(5) >>> structure = ndi.generate_binary_structure(2, 1) >>> list(chunk_faces(a.chunks, a.shape, structure)) [(slice(4, 6, None), slice(0, 5, None)), (slice(4, 6, None), slice(5, 10, None)), (slice(4, 6, None), slice(10, 11, None)), (slice(0, 5, None), slice(4, 6, None)), (slice(0, 5, None), slice(9, 11, None)), (slice(5, 10, None), slice(4, 6, None)), (slice(5, 10, None), slice(9, 11, None))] """ ndim = len(shape) slices = da.core.slices_from_chunks(chunks) # arrange block/chunk indices on grid block_summary = np.arange(len(slices)).reshape( [len(c) for c in chunks]) # Iterate over all blocks and use the structuring element # to determine which blocks should be connected. # For wrappped axes, we need to consider the block # before the current block with index -1 as well. numblocks = [len(c) if wrap_axes is None or ax not in wrap_axes else len(c) + 1 for ax, c in enumerate(chunks)] for curr_block in np.ndindex(tuple(numblocks)): curr_block = list(curr_block) if wrap_axes is not None: # start at -1 indices for wrapped axes for wrap_axis in wrap_axes: curr_block[wrap_axis] = curr_block[wrap_axis] - 1 # iterate over neighbors of the current block for pos_structure_coord in np.array(np.where(structure)).T: # only consider forward neighbors if min(pos_structure_coord) < 1 or max(pos_structure_coord) < 2: continue neigh_block = [ curr_block[dim] + pos_structure_coord[dim] - 1 for dim in range(ndim) ] if max([neigh_block[dim] >= block_summary.shape[dim] for dim in range(ndim)]): continue # get current slice index ind_curr_block = block_summary[tuple(curr_block)] curr_slice = [] for dim in range(ndim): # keep slice if not on boundary if neigh_block[dim] == curr_block[dim]: curr_slice.append(slices[ind_curr_block][dim]) # otherwise, add two-pixel-wide boundary else: if slices[ind_curr_block][dim].stop == shape[dim]: curr_slice.append(slice(None, None, shape[dim] - 1)) else: curr_slice.append(slice( slices[ind_curr_block][dim].stop - 1, slices[ind_curr_block][dim].stop + 1)) yield tuple(curr_slice) def block_ndi_label_delayed(block, structure): """ Delayed version of ``scipy.ndimage.label``. Parameters ---------- block : dask array (single chunk) The input array to be labeled. structure : array of bool Structure defining the connectivity of the labeling. Returns ------- labeled : dask array, same shape as ``block``. The labeled array. n : delayed int The number of labels in ``labeled``. """ label = dask.delayed(scipy.ndimage.label, nout=2) labeled_block, n = label(block, structure=structure) n = dask.delayed(LABEL_DTYPE.type)(n) labeled = da.from_delayed(labeled_block, shape=block.shape, dtype=LABEL_DTYPE) n = da.from_delayed(n, shape=(), dtype=LABEL_DTYPE) return labeled, n def connected_components_delayed(csr_matrix): """ Delayed version of scipy.sparse.csgraph.connected_components. This version only returns the (delayed) connected component labelling, not the number of components. """ conn_comp = dask.delayed(scipy.sparse.csgraph.connected_components, nout=2) return da.from_delayed(conn_comp(csr_matrix, directed=False)[1], shape=(np.nan,), dtype=CONN_COMP_DTYPE) ================================================ FILE: dask_image/ndmorph/__init__.py ================================================ # -*- coding: utf-8 -*- import scipy.ndimage from ..dispatch._dispatch_ndmorph import (dispatch_binary_dilation, dispatch_binary_erosion) from . import _ops, _utils __all__ = [ "binary_closing", "binary_dilation", "binary_erosion", "binary_opening", ] @_utils._update_wrapper(scipy.ndimage.binary_closing) def binary_closing(image, structure=None, iterations=1, origin=0, mask=None, border_value=0, brute_force=False): image = (image != 0) structure = _utils._get_structure(image, structure) iterations = _utils._get_iterations(iterations) origin = _utils._get_origin(structure.shape, origin) kwargs = dict( structure=structure, iterations=iterations, origin=origin, mask=mask, border_value=border_value, brute_force=brute_force ) result = image result = binary_dilation(result, **kwargs) result = binary_erosion(result, **kwargs) return result @_utils._update_wrapper(scipy.ndimage.binary_dilation) def binary_dilation(image, structure=None, iterations=1, mask=None, border_value=0, origin=0, brute_force=False): border_value = _utils._get_border_value(border_value) result = _ops._binary_op( dispatch_binary_dilation(image), image, structure=structure, iterations=iterations, mask=mask, origin=origin, brute_force=brute_force, border_value=border_value ) return result @_utils._update_wrapper(scipy.ndimage.binary_erosion) def binary_erosion(image, structure=None, iterations=1, mask=None, border_value=0, origin=0, brute_force=False): border_value = _utils._get_border_value(border_value) result = _ops._binary_op( dispatch_binary_erosion(image), image, structure=structure, iterations=iterations, mask=mask, origin=origin, brute_force=brute_force, border_value=border_value ) return result @_utils._update_wrapper(scipy.ndimage.binary_opening) def binary_opening(image, structure=None, iterations=1, origin=0, mask=None, border_value=0, brute_force=False): image = (image != 0) structure = _utils._get_structure(image, structure) iterations = _utils._get_iterations(iterations) origin = _utils._get_origin(structure.shape, origin) kwargs = dict( structure=structure, iterations=iterations, origin=origin, mask=mask, border_value=border_value, brute_force=brute_force ) result = image result = binary_erosion(result, **kwargs) result = binary_dilation(result, **kwargs) return result ================================================ FILE: dask_image/ndmorph/_ops.py ================================================ # -*- coding: utf-8 -*- import dask.array as da from . import _utils def _binary_op(func, image, structure=None, iterations=1, mask=None, origin=0, brute_force=False, **kwargs): image = (image != 0) structure = _utils._get_structure(image, structure) iterations = _utils._get_iterations(iterations) mask = _utils._get_mask(image, mask) origin = _utils._get_origin(structure.shape, origin) brute_force = _utils._get_brute_force(brute_force) depth = _utils._get_depth(structure.shape, origin) depth, boundary = _utils._get_depth_boundary(structure.ndim, depth, "none") result = image for i in range(iterations): iter_result = result.map_overlap( func, depth=depth, boundary=boundary, dtype=bool, meta=image._meta, structure=structure, origin=origin, **kwargs ) result = da.where(mask, iter_result, result) result._meta = image._meta.astype(bool) return result ================================================ FILE: dask_image/ndmorph/_utils.py ================================================ # -*- coding: utf-8 -*- import numbers import dask.array as da import numpy as np from ..dispatch._dispatch_ndmorph import dispatch_binary_structure from ..ndfilters._utils import (_get_depth, _get_depth_boundary, _get_origin, _update_wrapper) _update_wrapper = _update_wrapper _get_depth_boundary = _get_depth_boundary _get_origin = _get_origin _get_depth = _get_depth def _get_structure(image, structure): # Create square connectivity as default if structure is None: generate_binary_structure = dispatch_binary_structure(image) structure = generate_binary_structure(image.ndim, 1) elif hasattr(structure, 'ndim'): if structure.ndim != image.ndim: raise RuntimeError( "`structure` must have the same rank as `image`." ) if not issubclass(structure.dtype.type, np.bool_): structure = (structure != 0) else: raise TypeError("`structure` must be an array.") return structure def _get_iterations(iterations): if not isinstance(iterations, numbers.Integral): raise TypeError("`iterations` must be of integral type.") if iterations < 1: raise NotImplementedError( "`iterations` must be equal to 1 or greater not less." ) return iterations def _get_dtype(a): # Get the dtype of a value or an array. # Even handle non-NumPy types. return getattr(a, "dtype", np.dtype(type(a))) def _get_mask(image, mask): if mask is None: mask = True mask_type = _get_dtype(mask).type if isinstance(mask, (np.ndarray, da.Array)): if mask.shape != image.shape: raise RuntimeError("`mask` must have the same shape as `image`.") if not issubclass(mask_type, np.bool_): mask = (mask != 0) elif issubclass(mask_type, np.bool_): mask = bool(mask) else: raise TypeError("`mask` must be a Boolean or an array.") return mask def _get_border_value(border_value): if not isinstance(border_value, numbers.Integral): raise TypeError("`border_value` must be of integral type.") border_value = (border_value != 0) return border_value def _get_brute_force(brute_force): if brute_force is not False: if brute_force is True: raise NotImplementedError( "`brute_force` other than `False` is not yet supported." ) else: raise TypeError( "`brute_force` must be `bool`." ) return brute_force ================================================ FILE: docs/Makefile ================================================ # Makefile for Sphinx documentation # # You can set these variables from the command line. SPHINXOPTS = SPHINXBUILD = sphinx-build PAPER = BUILDDIR = _build # User-friendly check for sphinx-build ifeq ($(shell which $(SPHINXBUILD) >/dev/null 2>&1; echo $$?), 1) $(error The '$(SPHINXBUILD)' command was not found. Make sure you have Sphinx installed, then set the SPHINXBUILD environment variable to point to the full path of the '$(SPHINXBUILD)' executable. Alternatively you can add the directory with the executable to your PATH. If you don't have Sphinx installed, grab it from http://sphinx-doc.org/) endif # Internal variables. PAPEROPT_a4 = -D latex_paper_size=a4 PAPEROPT_letter = -D latex_paper_size=letter ALLSPHINXOPTS = -d $(BUILDDIR)/doctrees $(PAPEROPT_$(PAPER)) $(SPHINXOPTS) . # the i18n builder cannot share the environment and doctrees with the others I18NSPHINXOPTS = $(PAPEROPT_$(PAPER)) $(SPHINXOPTS) . .PHONY: help clean html dirhtml singlehtml pickle json htmlhelp qthelp devhelp epub latex latexpdf text man changes linkcheck doctest gettext help: @echo "Please use \`make ' where is one of" @echo " html to make standalone HTML files" @echo " dirhtml to make HTML files named index.html in directories" @echo " singlehtml to make a single large HTML file" @echo " pickle to make pickle files" @echo " json to make JSON files" @echo " htmlhelp to make HTML files and a HTML help project" @echo " qthelp to make HTML files and a qthelp project" @echo " devhelp to make HTML files and a Devhelp project" @echo " epub to make an epub" @echo " latex to make LaTeX files, you can set PAPER=a4 or PAPER=letter" @echo " latexpdf to make LaTeX files and run them through pdflatex" @echo " latexpdfja to make LaTeX files and run them through platex/dvipdfmx" @echo " text to make text files" @echo " man to make manual pages" @echo " texinfo to make Texinfo files" @echo " info to make Texinfo files and run them through makeinfo" @echo " gettext to make PO message catalogs" @echo " changes to make an overview of all changed/added/deprecated items" @echo " xml to make Docutils-native XML files" @echo " pseudoxml to make pseudoxml-XML files for display purposes" @echo " linkcheck to check all external links for integrity" @echo " doctest to run all doctests embedded in the documentation (if enabled)" clean: rm -rf $(BUILDDIR)/* html: $(SPHINXBUILD) -b html $(ALLSPHINXOPTS) $(BUILDDIR)/html @echo @echo "Build finished. The HTML pages are in $(BUILDDIR)/html." dirhtml: $(SPHINXBUILD) -b dirhtml $(ALLSPHINXOPTS) $(BUILDDIR)/dirhtml @echo @echo "Build finished. The HTML pages are in $(BUILDDIR)/dirhtml." singlehtml: $(SPHINXBUILD) -b singlehtml $(ALLSPHINXOPTS) $(BUILDDIR)/singlehtml @echo @echo "Build finished. The HTML page is in $(BUILDDIR)/singlehtml." pickle: $(SPHINXBUILD) -b pickle $(ALLSPHINXOPTS) $(BUILDDIR)/pickle @echo @echo "Build finished; now you can process the pickle files." json: $(SPHINXBUILD) -b json $(ALLSPHINXOPTS) $(BUILDDIR)/json @echo @echo "Build finished; now you can process the JSON files." htmlhelp: $(SPHINXBUILD) -b htmlhelp $(ALLSPHINXOPTS) $(BUILDDIR)/htmlhelp @echo @echo "Build finished; now you can run HTML Help Workshop with the" \ ".hhp project file in $(BUILDDIR)/htmlhelp." qthelp: $(SPHINXBUILD) -b qthelp $(ALLSPHINXOPTS) $(BUILDDIR)/qthelp @echo @echo "Build finished; now you can run "qcollectiongenerator" with the" \ ".qhcp project file in $(BUILDDIR)/qthelp, like this:" @echo "# qcollectiongenerator $(BUILDDIR)/qthelp/dask_image.qhcp" @echo "To view the help file:" @echo "# assistant -collectionFile $(BUILDDIR)/qthelp/dask_image.qhc" devhelp: $(SPHINXBUILD) -b devhelp $(ALLSPHINXOPTS) $(BUILDDIR)/devhelp @echo @echo "Build finished." @echo "To view the help file:" @echo "# mkdir -p $$HOME/.local/share/devhelp/dask_image" @echo "# ln -s $(BUILDDIR)/devhelp $$HOME/.local/share/devhelp/dask_image" @echo "# devhelp" epub: $(SPHINXBUILD) -b epub $(ALLSPHINXOPTS) $(BUILDDIR)/epub @echo @echo "Build finished. The epub file is in $(BUILDDIR)/epub." latex: $(SPHINXBUILD) -b latex $(ALLSPHINXOPTS) $(BUILDDIR)/latex @echo @echo "Build finished; the LaTeX files are in $(BUILDDIR)/latex." @echo "Run \`make' in that directory to run these through (pdf)latex" \ "(use \`make latexpdf' here to do that automatically)." latexpdf: $(SPHINXBUILD) -b latex $(ALLSPHINXOPTS) $(BUILDDIR)/latex @echo "Running LaTeX files through pdflatex..." $(MAKE) -C $(BUILDDIR)/latex all-pdf @echo "pdflatex finished; the PDF files are in $(BUILDDIR)/latex." latexpdfja: $(SPHINXBUILD) -b latex $(ALLSPHINXOPTS) $(BUILDDIR)/latex @echo "Running LaTeX files through platex and dvipdfmx..." $(MAKE) -C $(BUILDDIR)/latex all-pdf-ja @echo "pdflatex finished; the PDF files are in $(BUILDDIR)/latex." text: $(SPHINXBUILD) -b text $(ALLSPHINXOPTS) $(BUILDDIR)/text @echo @echo "Build finished. The text files are in $(BUILDDIR)/text." man: $(SPHINXBUILD) -b man $(ALLSPHINXOPTS) $(BUILDDIR)/man @echo @echo "Build finished. The manual pages are in $(BUILDDIR)/man." texinfo: $(SPHINXBUILD) -b texinfo $(ALLSPHINXOPTS) $(BUILDDIR)/texinfo @echo @echo "Build finished. The Texinfo files are in $(BUILDDIR)/texinfo." @echo "Run \`make' in that directory to run these through makeinfo" \ "(use \`make info' here to do that automatically)." info: $(SPHINXBUILD) -b texinfo $(ALLSPHINXOPTS) $(BUILDDIR)/texinfo @echo "Running Texinfo files through makeinfo..." make -C $(BUILDDIR)/texinfo info @echo "makeinfo finished; the Info files are in $(BUILDDIR)/texinfo." gettext: $(SPHINXBUILD) -b gettext $(I18NSPHINXOPTS) $(BUILDDIR)/locale @echo @echo "Build finished. The message catalogs are in $(BUILDDIR)/locale." changes: $(SPHINXBUILD) -b changes $(ALLSPHINXOPTS) $(BUILDDIR)/changes @echo @echo "The overview file is in $(BUILDDIR)/changes." linkcheck: $(SPHINXBUILD) -b linkcheck $(ALLSPHINXOPTS) $(BUILDDIR)/linkcheck @echo @echo "Link check complete; look for any errors in the above output " \ "or in $(BUILDDIR)/linkcheck/output.txt." doctest: $(SPHINXBUILD) -b doctest $(ALLSPHINXOPTS) $(BUILDDIR)/doctest @echo "Testing of doctests in the sources finished, look at the " \ "results in $(BUILDDIR)/doctest/output.txt." xml: $(SPHINXBUILD) -b xml $(ALLSPHINXOPTS) $(BUILDDIR)/xml @echo @echo "Build finished. The XML files are in $(BUILDDIR)/xml." pseudoxml: $(SPHINXBUILD) -b pseudoxml $(ALLSPHINXOPTS) $(BUILDDIR)/pseudoxml @echo @echo "Build finished. The pseudo-XML files are in $(BUILDDIR)/pseudoxml." ================================================ FILE: docs/api.rst ================================================ API === .. toctree:: :glob: dask_image ================================================ FILE: docs/authors.rst ================================================ .. include:: ../AUTHORS.rst ================================================ FILE: docs/conf.py ================================================ #!/usr/bin/env python # -*- coding: utf-8 -*- # # dask-image documentation build configuration file, created by # sphinx-quickstart on Tue Jul 9 22:26:36 2013. # # This file is execfile()d with the current directory set to its # containing dir. # # Note that not all possible configuration values are present in this # autogenerated file. # # All configuration values have a default; values that are commented out # serve to show the default. import sys import os import dask_image._version # Get the project root dir, which is the parent dir of this cwd = os.getcwd() project_root = os.path.dirname(cwd) # Insert the project root dir as the first element in the PYTHONPATH. # This lets us ensure that the source package is imported, and that its # version is used. sys.path.insert(0, project_root) # -- General configuration --------------------------------------------- # Add any Sphinx extension module names here, as strings. They can be # extensions coming with Sphinx (named 'sphinx.ext.*') or your custom ones. extensions = [ 'sphinx.ext.autodoc', 'sphinx.ext.viewcode', 'sphinx.ext.todo', 'sphinx.ext.napoleon' ] # Add any paths that contain templates here, relative to this directory. templates_path = ['_templates'] # The suffix of source filenames. source_suffix = '.rst' # The master toctree document. master_doc = 'index' # General information about the project. project = u'dask-image' copyright = u"2018, John Kirkham" # The version info for the project you're documenting, acts as replacement # for |version| and |release|, also used in various other places throughout # the built documents. # # The full version, including alpha/beta/rc tags. release = dask_image._version.__version__ # The short X.Y.Z version. version = '.'.join(release.split('.')[:3]) if "dev" in release: display_version = "(development version)" else: display_version = version # List of patterns, relative to source directory, that match files and # directories to ignore when looking for source files. exclude_patterns = ['_build'] # -- Options for HTML output ------------------------------------------- # Set canonical URL from the Read the Docs Domain html_baseurl = os.environ.get("READTHEDOCS_CANONICAL_URL", "") # Tell Jinja2 templates the build is running on Read the Docs if os.environ.get("READTHEDOCS", "") == "True": if "html_context" not in globals(): html_context = {} html_context["READTHEDOCS"] = True # The theme to use for HTML and HTML Help pages. See the documentation for # a list of builtin themes. html_theme = 'dask_sphinx_theme' # The name for this set of Sphinx documents. If None, it defaults to # " v documentation". html_title = f"{project} {display_version} documentation" # A shorter title for the navigation bar. Default is the same as # html_title. html_short_title = f"{project} docs" # Output file base name for HTML help builder. htmlhelp_basename = 'dask_imagedoc' # -- Options for LaTeX output ------------------------------------------ latex_elements = {} # Grouping the document tree into LaTeX files. List of tuples # (source start file, target name, title, author, documentclass # [howto/manual]). latex_documents = [ ('index', 'dask_image.tex', u'dask-image Documentation', u'John Kirkham', 'manual'), ] # -- Options for manual page output ------------------------------------ # One entry per manual page. List of tuples # (source start file, name, description, authors, manual section). man_pages = [ ('index', 'dask_image', u'dask-image Documentation', [u'John Kirkham'], 1) ] # -- Options for Texinfo output ---------------------------------------- # Grouping the document tree into Texinfo files. List of tuples # (source start file, target name, title, author, # dir menu entry, description, category) texinfo_documents = [ ('index', 'dask_image', u'dask-image Documentation', u'John Kirkham', 'dask_image', 'One line description of project.', 'Miscellaneous'), ] # Run sphinx-apidoc before building docs. def run_apidoc(_): ignore_paths = [ "../setup.py", "../tests", "../travis_pypi_setup.py", ] argv = [ "-f", "-T", "-e", "-M", "-o", ".", ".." ] + ignore_paths try: # Sphinx 1.7+ from sphinx.ext import apidoc except ImportError: # Sphinx 1.6 (and earlier) from sphinx import apidoc argv.insert(0, apidoc.__file__) apidoc.main(argv) def setup(app): app.connect('builder-inited', run_apidoc) ================================================ FILE: docs/contributing.rst ================================================ .. include:: ../CONTRIBUTING.rst ================================================ FILE: docs/coverage.rst ================================================ ***************** Function Coverage ***************** Coverage of dask-image vs scipy ndimage functions ************************************************* This table shows which SciPy ndimage functions are supported by dask-image. .. list-table:: :widths: 25 25 25 30 :header-rows: 0 * - Function name - SciPy ndimage - dask-image - dask-image GPU support * - ``affine_transform`` - ✓ - ✓ - ✓ * - ``binary_closing`` - ✓ - ✓ - ✓ * - ``binary_dilation`` - ✓ - ✓ - ✓ * - ``binary_erosion`` - ✓ - ✓ - ✓ * - ``binary_fill_holes`` - ✓ - - * - ``binary_hit_or_miss`` - ✓ - - * - ``binary_opening`` - ✓ - ✓ - ✓ * - ``binary_propagation`` - ✓ - - * - ``black_tophat`` - ✓ - - * - ``center_of_mass`` - ✓ - ✓ - * - ``convolve`` - ✓ - ✓ - ✓ * - ``convolve1d`` - ✓ - - * - ``correlate`` - ✓ - ✓ - ✓ * - ``correlate1d`` - ✓ - - * - ``distance_transform_bf`` - ✓ - - * - ``distance_transform_cdt`` - ✓ - - * - ``distance_transform_edt`` - ✓ - - * - ``extrema`` - ✓ - ✓ - * - ``find_objects`` - ✓ - ✓ - * - ``fourier_ellipsoid`` - ✓ - - * - ``fourier_gaussian`` - ✓ - ✓ - * - ``fourier_shift`` - ✓ - ✓ - * - ``fourier_uniform`` - ✓ - ✓ - * - ``gaussian_filter`` - ✓ - ✓ - ✓ * - ``gaussian_filter1d`` - ✓ - - * - ``gaussian_gradient_magnitude`` - ✓ - ✓ - ✓ * - ``gaussian_laplace`` - ✓ - ✓ - ✓ * - ``generate_binary_structure`` - ✓ - - * - ``generic_filter`` - ✓ - ✓ - ✓ * - ``generic_filter1d`` - ✓ - - * - ``generic_gradient_magnitude`` - ✓ - - * - ``generic_laplace`` - ✓ - - * - ``geometric_transform`` - ✓ - - * - ``grey_closing`` - ✓ - - * - ``grey_dilation`` - ✓ - - * - ``grey_erosion`` - ✓ - - * - ``grey_opening`` - ✓ - - * - ``histogram`` - ✓ - ✓ - * - ``imread`` - ✓ - ✓ - ✓ * - ``iterate_structure`` - ✓ - - * - ``label`` - ✓ - ✓ - * - ``labeled_comprehension`` - ✓ - ✓ - * - ``laplace`` - ✓ - ✓ - ✓ * - ``map_coordinates`` - ✓ - ✓ - * - ``maximum`` - ✓ - ✓ - * - ``maximum_filter`` - ✓ - ✓ - ✓ * - ``maximum_filter1d`` - ✓ - - * - ``maximum_position`` - ✓ - ✓ - * - ``mean`` - ✓ - ✓ - * - ``median`` - ✓ - ✓ - * - ``median_filter`` - ✓ - ✓ - ✓ * - ``minimum`` - ✓ - ✓ - * - ``minimum_filter`` - ✓ - ✓ - ✓ * - ``minimum_filter1d`` - ✓ - - * - ``minimum_position`` - ✓ - ✓ - * - ``morphological_gradient`` - ✓ - - * - ``morphological_laplace`` - ✓ - - * - ``percentile_filter`` - ✓ - ✓ - ✓ * - ``prewitt`` - ✓ - ✓ - ✓ * - ``rank_filter`` - ✓ - ✓ - ✓ * - ``rotate`` - ✓ - ✓ - * - ``shift`` - ✓ - - * - ``sobel`` - ✓ - ✓ - ✓ * - ``spline_filter`` - ✓ - ✓ - ✓ * - ``spline_filter1d`` - ✓ - ✓ - ✓ * - ``standard_deviation`` - ✓ - ✓ - * - ``sum_labels`` - ✓ - ✓ - * - ``threshold_local`` - scikit-image function - ✓ - ✓ * - ``uniform_filter`` - ✓ - ✓ - ✓ * - ``uniform_filter1d`` - ✓ - - * - ``variance`` - ✓ - ✓ - * - ``watershed_ift`` - ✓ - - * - ``white_tophat`` - ✓ - - * - ``zoom`` - ✓ - - ================================================ FILE: docs/history.rst ================================================ .. include:: ../HISTORY.rst ================================================ FILE: docs/index.rst ================================================ Image processing with Dask Arrays ================================= Features -------- * Support focuses on Dask Arrays. * Provides support for loading image files. * Implements commonly used N-D filters. * Includes a few N-D Fourier filters. * Provides some functions for working with N-D label images. * Supports a few N-D morphological operators. Contents -------- .. toctree:: :maxdepth: 1 installation quickstart coverage api contributing authors history Indices and tables ------------------ * :ref:`genindex` * :ref:`modindex` * :ref:`search` ================================================ FILE: docs/installation.rst ================================================ .. highlight:: shell ============ Installation ============ Stable release -------------- To install dask-image, run this command in your terminal: .. code-block:: console $ conda install -c conda-forge dask-image This is the preferred method to install dask-image, as it will always install the most recent stable release. If you don't have `conda`_ installed, we recommend downloading and installing it with the conda-forge distribution `Miniforge`_. Alternatively, you can install dask-image with pip: .. code-block:: console $ python -m pip install dask-image If you don't have `pip`_ installed, this `Python installation guide`_ can guide you through the process. .. _conda: https://conda.io/en/latest/ .. _Miniforge: https://conda-forge.org/download/ .. _pip: https://pip.pypa.io .. _Python installation guide: http://docs.python-guide.org/en/latest/starting/installation/ From sources ------------ The sources for dask-image can be downloaded from the `Github repo`_. You can either clone the public repository: .. code-block:: console $ git clone git://github.com/dask/dask-image Or download the `tarball`_: .. code-block:: console $ curl -OL https://github.com/dask/dask-image/tarball/main Once you have a copy of the source, you can install it with: .. code-block:: console $ cd dask-image $ python -m pip install . .. _Github repo: https://github.com/dask/dask-image .. _tarball: https://github.com/dask/dask-image/tarball/main ================================================ FILE: docs/make.bat ================================================ @ECHO OFF REM Command file for Sphinx documentation if "%SPHINXBUILD%" == "" ( set SPHINXBUILD=sphinx-build ) set BUILDDIR=_build set ALLSPHINXOPTS=-d %BUILDDIR%/doctrees %SPHINXOPTS% . set I18NSPHINXOPTS=%SPHINXOPTS% . if NOT "%PAPER%" == "" ( set ALLSPHINXOPTS=-D latex_paper_size=%PAPER% %ALLSPHINXOPTS% set I18NSPHINXOPTS=-D latex_paper_size=%PAPER% %I18NSPHINXOPTS% ) if "%1" == "" goto help if "%1" == "help" ( :help echo.Please use `make ^` where ^ is one of echo. html to make standalone HTML files echo. dirhtml to make HTML files named index.html in directories echo. singlehtml to make a single large HTML file echo. pickle to make pickle files echo. json to make JSON files echo. htmlhelp to make HTML files and a HTML help project echo. qthelp to make HTML files and a qthelp project echo. devhelp to make HTML files and a Devhelp project echo. epub to make an epub echo. latex to make LaTeX files, you can set PAPER=a4 or PAPER=letter echo. text to make text files echo. man to make manual pages echo. texinfo to make Texinfo files echo. gettext to make PO message catalogs echo. changes to make an overview over all changed/added/deprecated items echo. xml to make Docutils-native XML files echo. pseudoxml to make pseudoxml-XML files for display purposes echo. linkcheck to check all external links for integrity echo. doctest to run all doctests embedded in the documentation if enabled goto end ) if "%1" == "clean" ( for /d %%i in (%BUILDDIR%\*) do rmdir /q /s %%i del /q /s %BUILDDIR%\* goto end ) %SPHINXBUILD% 2> nul if errorlevel 9009 ( echo. echo.The 'sphinx-build' command was not found. Make sure you have Sphinx echo.installed, then set the SPHINXBUILD environment variable to point echo.to the full path of the 'sphinx-build' executable. Alternatively you echo.may add the Sphinx directory to PATH. echo. echo.If you don't have Sphinx installed, grab it from echo.http://sphinx-doc.org/ exit /b 1 ) if "%1" == "html" ( %SPHINXBUILD% -b html %ALLSPHINXOPTS% %BUILDDIR%/html if errorlevel 1 exit /b 1 echo. echo.Build finished. The HTML pages are in %BUILDDIR%/html. goto end ) if "%1" == "dirhtml" ( %SPHINXBUILD% -b dirhtml %ALLSPHINXOPTS% %BUILDDIR%/dirhtml if errorlevel 1 exit /b 1 echo. echo.Build finished. The HTML pages are in %BUILDDIR%/dirhtml. goto end ) if "%1" == "singlehtml" ( %SPHINXBUILD% -b singlehtml %ALLSPHINXOPTS% %BUILDDIR%/singlehtml if errorlevel 1 exit /b 1 echo. echo.Build finished. The HTML pages are in %BUILDDIR%/singlehtml. goto end ) if "%1" == "pickle" ( %SPHINXBUILD% -b pickle %ALLSPHINXOPTS% %BUILDDIR%/pickle if errorlevel 1 exit /b 1 echo. echo.Build finished; now you can process the pickle files. goto end ) if "%1" == "json" ( %SPHINXBUILD% -b json %ALLSPHINXOPTS% %BUILDDIR%/json if errorlevel 1 exit /b 1 echo. echo.Build finished; now you can process the JSON files. goto end ) if "%1" == "htmlhelp" ( %SPHINXBUILD% -b htmlhelp %ALLSPHINXOPTS% %BUILDDIR%/htmlhelp if errorlevel 1 exit /b 1 echo. echo.Build finished; now you can run HTML Help Workshop with the ^ .hhp project file in %BUILDDIR%/htmlhelp. goto end ) if "%1" == "qthelp" ( %SPHINXBUILD% -b qthelp %ALLSPHINXOPTS% %BUILDDIR%/qthelp if errorlevel 1 exit /b 1 echo. echo.Build finished; now you can run "qcollectiongenerator" with the ^ .qhcp project file in %BUILDDIR%/qthelp, like this: echo.^> qcollectiongenerator %BUILDDIR%\qthelp\dask_image.qhcp echo.To view the help file: echo.^> assistant -collectionFile %BUILDDIR%\qthelp\dask_image.ghc goto end ) if "%1" == "devhelp" ( %SPHINXBUILD% -b devhelp %ALLSPHINXOPTS% %BUILDDIR%/devhelp if errorlevel 1 exit /b 1 echo. echo.Build finished. goto end ) if "%1" == "epub" ( %SPHINXBUILD% -b epub %ALLSPHINXOPTS% %BUILDDIR%/epub if errorlevel 1 exit /b 1 echo. echo.Build finished. The epub file is in %BUILDDIR%/epub. goto end ) if "%1" == "latex" ( %SPHINXBUILD% -b latex %ALLSPHINXOPTS% %BUILDDIR%/latex if errorlevel 1 exit /b 1 echo. echo.Build finished; the LaTeX files are in %BUILDDIR%/latex. goto end ) if "%1" == "latexpdf" ( %SPHINXBUILD% -b latex %ALLSPHINXOPTS% %BUILDDIR%/latex cd %BUILDDIR%/latex make all-pdf cd %BUILDDIR%/.. echo. echo.Build finished; the PDF files are in %BUILDDIR%/latex. goto end ) if "%1" == "latexpdfja" ( %SPHINXBUILD% -b latex %ALLSPHINXOPTS% %BUILDDIR%/latex cd %BUILDDIR%/latex make all-pdf-ja cd %BUILDDIR%/.. echo. echo.Build finished; the PDF files are in %BUILDDIR%/latex. goto end ) if "%1" == "text" ( %SPHINXBUILD% -b text %ALLSPHINXOPTS% %BUILDDIR%/text if errorlevel 1 exit /b 1 echo. echo.Build finished. The text files are in %BUILDDIR%/text. goto end ) if "%1" == "man" ( %SPHINXBUILD% -b man %ALLSPHINXOPTS% %BUILDDIR%/man if errorlevel 1 exit /b 1 echo. echo.Build finished. The manual pages are in %BUILDDIR%/man. goto end ) if "%1" == "texinfo" ( %SPHINXBUILD% -b texinfo %ALLSPHINXOPTS% %BUILDDIR%/texinfo if errorlevel 1 exit /b 1 echo. echo.Build finished. The Texinfo files are in %BUILDDIR%/texinfo. goto end ) if "%1" == "gettext" ( %SPHINXBUILD% -b gettext %I18NSPHINXOPTS% %BUILDDIR%/locale if errorlevel 1 exit /b 1 echo. echo.Build finished. The message catalogs are in %BUILDDIR%/locale. goto end ) if "%1" == "changes" ( %SPHINXBUILD% -b changes %ALLSPHINXOPTS% %BUILDDIR%/changes if errorlevel 1 exit /b 1 echo. echo.The overview file is in %BUILDDIR%/changes. goto end ) if "%1" == "linkcheck" ( %SPHINXBUILD% -b linkcheck %ALLSPHINXOPTS% %BUILDDIR%/linkcheck if errorlevel 1 exit /b 1 echo. echo.Link check complete; look for any errors in the above output ^ or in %BUILDDIR%/linkcheck/output.txt. goto end ) if "%1" == "doctest" ( %SPHINXBUILD% -b doctest %ALLSPHINXOPTS% %BUILDDIR%/doctest if errorlevel 1 exit /b 1 echo. echo.Testing of doctests in the sources finished, look at the ^ results in %BUILDDIR%/doctest/output.txt. goto end ) if "%1" == "xml" ( %SPHINXBUILD% -b xml %ALLSPHINXOPTS% %BUILDDIR%/xml if errorlevel 1 exit /b 1 echo. echo.Build finished. The XML files are in %BUILDDIR%/xml. goto end ) if "%1" == "pseudoxml" ( %SPHINXBUILD% -b pseudoxml %ALLSPHINXOPTS% %BUILDDIR%/pseudoxml if errorlevel 1 exit /b 1 echo. echo.Build finished. The pseudo-XML files are in %BUILDDIR%/pseudoxml. goto end ) :end ================================================ FILE: docs/quickstart.rst ================================================ .. highlight:: shell ========== Quickstart ========== Importing dask-image -------------------- Import dask image is with an underscore, like this example: .. code-block:: python import dask_image.imread import dask_image.ndfilters Dask Examples ------------- We highly recommend checking out the dask-image-quickstart.ipynb notebook (and any other dask-image example notebooks) at the dask-examples repository. You can find the dask-image quickstart notebook in the ``applications`` folder of this repository: https://github.com/dask/dask-examples The direct link to the notebook file is here: https://github.com/dask/dask-examples/blob/main/applications/image-processing.ipynb All the example notebooks are available to launch with mybinder and test out interactively. An Even Quicker Start --------------------- You can read files stored on disk into a dask array by passing the filename, or regex matching multiple filenames into ``imread()``. .. code-block:: python filename_pattern = 'path/to/image-*.png' images = dask_image.imread.imread(filename_pattern) If your images are parts of a much larger image, dask can stack, concatenate or block chunks together: http://docs.dask.org/en/latest/array-stack.html Calling dask-image functions is also easy. .. code-block:: python import dask_image.ndfilters blurred_image = dask_image.ndfilters.gaussian_filter(images, sigma=10) Many other functions can be applied to dask arrays. See the dask_array_documentation_ for more detail on general array operations. .. _dask_array_documentation: http://docs.dask.org/en/latest/array.html .. code-block:: python result = function_name(images) Further Reading --------------- Good places to start include: * The dask-image API documentation: http://image.dask.org/en/latest/api.html * The documentation on working with dask arrays: http://docs.dask.org/en/latest/array.html Talks and Slides ---------------- Here are some talks and slides that you can watch to learn dask-image: - 2020, Genevieve Buckley's talk at PyConAU and SciPy Japan - `Watch the talk `_ - `Scipy Japanのトークを見る(プレゼンテーション:英語, 字幕:日本語) `_ Watch the talk at SciPy Japan (presentation in English, captions in Japanese) - `See the slides `_ - 2019, John Kirkham's SciPy talk - `Watch the talk here `_ - `See the slides here `_ ================================================ FILE: docs/release/generate_release_notes.py ================================================ """Generate the release notes automatically from Github pull requests. Start with: ``` export GH_TOKEN= ``` Then, for to include everything from a certain release to main: ``` python /path/to/generate_release_notes.py v0.14.0 main --version 0.15.0 ``` Or two include only things between two releases: ``` python /path/to/generate_release_notes.py v.14.2 v0.14.3 --version 0.14.3 ``` You should probably redirect the output with: ``` python /path/to/generate_release_notes.py [args] | tee release_notes.md ``` You'll require PyGitHub and tqdm, which you can install with: ``` pip install PyGithub>=1.44.1 twine>=3.1.1 tqdm ``` References https://github.com/scikit-image/scikit-image/blob/master/tools/generate_release_notes.py https://github.com/scikit-image/scikit-image/issues/3404 https://github.com/scikit-image/scikit-image/issues/3405 """ import os import argparse from datetime import datetime from collections import OrderedDict from warnings import warn from github import Github try: from tqdm import tqdm except ImportError: warn( 'tqdm not installed. This script takes approximately 5 minutes ' 'to run. To view live progressbars, please install tqdm. ' 'Otherwise, be patient.' ) def tqdm(i, **kwargs): return i GH = "https://github.com" GH_USER = 'dask' GH_REPO = 'dask-image' GH_TOKEN = os.environ.get('GH_TOKEN') if GH_TOKEN is None: raise RuntimeError( "It is necessary that the environment variable `GH_TOKEN` " "be set to avoid running into problems with rate limiting. " "One can be acquired at https://github.com/settings/tokens.\n\n" "You do not need to select any permission boxes while generating " "the token." ) g = Github(GH_TOKEN) repository = g.get_repo(f'{GH_USER}/{GH_REPO}') parser = argparse.ArgumentParser(usage=__doc__) parser.add_argument('from_commit', help='The starting tag.') parser.add_argument('to_commit', help='The head branch.') parser.add_argument( '--version', help="Version you're about to release.", default='0.2.0' ) args = parser.parse_args() for tag in repository.get_tags(): if tag.name == args.from_commit: previous_tag = tag break else: raise RuntimeError(f'Desired tag ({args.from_commit}) not found') # For some reason, go get the github commit from the commit to get # the correct date github_commit = previous_tag.commit.commit previous_tag_date = datetime.strptime( github_commit.last_modified, '%a, %d %b %Y %H:%M:%S %Z' ) all_commits = list( tqdm( repository.get_commits(sha=args.to_commit, since=previous_tag_date), desc=f'Getting all commits between {args.from_commit} ' f'and {args.to_commit}', ) ) all_hashes = set(c.sha for c in all_commits) def add_to_users(users, new_user): if new_user.name is None: users[new_user.login] = new_user.login else: users[new_user.login] = new_user.name authors = set() committers = set() reviewers = set() users = {} for commit in tqdm(all_commits, desc="Getting commiters and authors"): if commit.committer is not None: add_to_users(users, commit.committer) committers.add(commit.committer.login) if commit.author is not None: add_to_users(users, commit.author) authors.add(commit.author.login) # remove these bots. committers.discard("web-flow") authors.discard("azure-pipelines-bot") highlights = OrderedDict() highlights['Highlights'] = {} highlights['New Features'] = {} highlights['Improvements'] = {} highlights['Bug Fixes'] = {} highlights['API Changes'] = {} highlights['Deprecations'] = {} highlights['Build Tools'] = {} other_pull_requests = {} for pull in tqdm( g.search_issues( f'repo:{GH_USER}/{GH_REPO} ' f'is:pull-request ' f'merged:>{previous_tag_date.isoformat()} ' 'sort:created-asc' ), desc='Pull Requests...', ): pr = repository.get_pull(pull.number) if pr.merge_commit_sha in all_hashes: summary = pull.title for review in pr.get_reviews(): if review.user is not None: add_to_users(users, review.user) reviewers.add(review.user.login) for key, key_dict in highlights.items(): pr_title_prefix = (key + ': ').lower() if summary.lower().startswith(pr_title_prefix): key_dict[pull.number] = { 'summary': summary[len(pr_title_prefix):] } break else: other_pull_requests[pull.number] = {'summary': summary} # add Other PRs to the ordered dict to make doc generation easier. highlights['Other Pull Requests'] = other_pull_requests # Now generate the release notes title = (f'{args.version} ({datetime.today().strftime("%Y-%m-%d")})') title += '\n' + '-' * len(title) # title underline of same length as title print(title) print( f""" We're pleased to announce the release of dask-image {args.version}! """ ) for section, pull_request_dicts in highlights.items(): print(f'{section}\n') if len(pull_request_dicts.items()) == 0: print() for number, pull_request_info in pull_request_dicts.items(): print(f'* {pull_request_info["summary"]} (#{number})') contributors = OrderedDict() contributors['authors'] = authors contributors['reviewers'] = reviewers # ignore committers # contributors['committers'] = committers for section_name, contributor_set in contributors.items(): print() if None in contributor_set: contributor_set.remove(None) committer_str = ( f'{len(contributor_set)} {section_name} added to this ' 'release (alphabetical)' ) print(committer_str) print() for c in sorted(contributor_set, key=lambda x: users[x].lower()): commit_link = f"{GH}/{GH_USER}/{GH_REPO}/commits?author={c}" print(f"* `{users[c]} <{commit_link}>`_ - @{c}") print() ================================================ FILE: docs/release/release_guide.rst ================================================ ============= Release Guide ============= This guide documents the ``dask-image`` release process. It is based on the ``napari`` release guide created by Kira Evans. This guide is primarily intended for core developers of `dask-image`. They will need to have a `PyPI `_ account with upload permissions to the ``dask-image`` package. They will also need permissions to merge pull requests in the ``dask-image`` conda-forge feedstock repository: https://github.com/conda-forge/dask-image-feedstock. You will also need these additional release dependencies to complete the release process: .. code-block:: bash pip install "PyGithub>=1.44.1" "twine>=3.1.1" tqdm Set PyPI password as GitHub secret ---------------------------------- The `dask/dask-image` repository must have a PyPI API token as a GitHub secret. This likely has been done already, but if it has not, follow `this guide `_ to gain a token and `this other guide `_ to add it as a secret. Determining the new version number ---------------------------------- We use `calendar versioning (CalVer) `_ for `dask-image`. This means version numbers have the format `YYYY.MM.X`. Here, YYYY indicates the year, MM indicates the month, and X is an integer counter beginning at zero (to distinguish between cases where multiple releases were made in the same month). `setuptools-scm `_ then determines the exact version from the latest `git tag `_ beginning with `v`. So our git tags will have the format `vYYYY.MM.X`. So for example, a git tag "v2030.01.0" will be the first release made in the month of January, in the year 2030. Generate the release notes -------------------------- The release notes contain a list of merges, contributors, and reviewers. 1. Create a GH_TOKEN environment variable on your computer. On Linux/Mac: .. code-block:: bash export GH_TOKEN= On Windows: .. code-block:: set GH_TOKEN If you don't already have a `personal GitHub API token `_, you can create one from the developer settings of your GitHub account: ``_ 2. Run the python script to generate the release notes, including all changes since the last tagged release. Note: The PyGithub package must be installed to run this script (https://github.com/PyGithub/PyGithub) Call the script like this: .. code-block:: bash python docs/release/generate_release_notes.py main --version An example: .. code-block:: bash python docs/release/generate_release_notes.py v2021.05.24 main --version 2021.06.03 See help for this script with: .. code-block:: bash python docs/release/generate_release_notes.py -h 3. Scan the PR titles for highlights, deprecations, API changes, and bugfixes, and mention these in the relevant sections of the notes. Try to present the information in an expressive way by mentioning the affected functions, elaborating on the changes and their consequences. If possible, organize semantically close PRs in groups. 4. Copy your edited release notes into the file ``HISTORY.rst``. 5. Make and merge a PR with the release notes before moving onto the next steps. Create the release candidate ----------------------------- Go to the dask-image releases page: https://github.com/dask/dask-image/releases Click the "Draft Release" button to create a new release candidate. - Both the tag version and release title should have the format ``vYYYY.MM.Xrc1``. - Copy-paste the release notes from ``HISTORY.rst`` for this release into the description text box. - Tick "Set as a pre-release" Note here how we are using ``rc`` for release candidate to create a version of our release we can test before making the real release. Creating the release will trigger a GitHub actions script, which automatically uploads the release to PyPI. Testing the release candidate ----------------------------- The release candidate can then be tested with .. code-block:: bash pip install --pre dask-image It is recommended that the release candidate is tested in a virtual environment in order to isolate dependencies. If the release candidate is not what you want, make your changes and repeat the process from the beginning but incrementing the number after ``rc`` (e.g. ``vYYYY.MM.Xrc1``). Once you are satisfied with the release candidate it is time to generate the actual release. Generating the actual release ----------------------------- To generate the actual release you will now repeat the processes above but now - dropping the ``rc`` suffix from the version number. - ticking "Set as the latest release" This will automatically upload the release to PyPI, and will also automatically begin the process to release the new version on conda-forge. Releasing on conda-forge ------------------------ It usually takes about an hour or so for the conda-forge bot ``regro-cf-autotick-bot`` to see that there is a new release available on PyPI, and open a pull request in the ``dask-image`` conda-forge feedstock here: https://github.com/conda-forge/dask-image-feedstock Note: the conda-forge bot will not open a PR for any of the release candidates, only for the final release. Only one PR is opened for As an alternative to waiting for the conda-forge bot to notice the new release, you can submit a new dask-image feedstock issue indicating ``@conda-forge-admin, please update version`` in the issue title. This will `trigger `_` the bot to check for new versions. Before merging the pull request, first you should check: * That all the tests have passed on CI for this pull request * If any dependencies were changed, and should be updated by commiting changes to ``recipe/meta.yaml`` to the pull request Once that all looks good you can merge the pull request, and the newest version of ``dask-image`` will automatically be made available on conda-forge. We're finished! ================================================ FILE: pyproject.toml ================================================ [build-system] requires = ["setuptools>=64", "setuptools_scm>=8"] build-backend = "setuptools.build_meta" [project] name = "dask-image" authors = [{name="dask-image contributors. see https://github.com/dask/dask-image/graphs/contributors"}] # noqa: E501 description = "Distributed image processing" keywords = ["dask-image", "dask", "image"] readme = "README.rst" license = { text = "BSD-3-Clause" } dynamic = ["version"] requires-python = ">=3.9" classifiers = [ "Development Status :: 2 - Pre-Alpha", "Intended Audience :: Developers", "License :: OSI Approved :: BSD License", "Natural Language :: English", "Operating System :: OS Independent", "Programming Language :: Python :: 3", "Programming Language :: Python :: 3.9", "Programming Language :: Python :: 3.10", "Programming Language :: Python :: 3.11", "Programming Language :: Python :: 3.12", ] dependencies = [ "dask[array] >=2024.4.1", "numpy >=1.18", "scipy >=1.7.0", "pims >=0.4.1", "tifffile >=2020.10.1", ] [project.optional-dependencies] dataframe = [ "dask[dataframe] >=2024.4.1", "pandas >=2.0.0", ] test = [ "build >=1.2.1", "coverage >=7.2.1", "flake8 >=6.0.0", "Flake8-pyproject", "pytest >=7.2.2", "pytest-cov >=4.0.0", "pytest-flake8 >=1.1.1", "pytest-timeout >=2.3.1", "twine >=3.1.1", ] gpu = [ "cupy >=9.0.0", ] [project.urls] "Homepage" = "https://image.dask.org" "Issue Tracker" = "https://github.com/dask/dask-image/issues" "Source Code" = "https://github.com/dask/dask-image" [tool.setuptools_scm] version_scheme = "no-guess-dev" version_file = "dask_image/_version.py" [tool.setuptools] include-package-data = true zip-safe = false license-files = [ "LICENSE.txt", ] [tool.setuptools.packages.find] include = [ "dask_image*", ] [tool.pytest.ini_options] addopts = "--flake8" markers = "cupy" [tool.flake8] exclude = ["dask_image/_version.py"] ================================================ FILE: tests/__init__.py ================================================ # -*- coding: utf-8 -*- ================================================ FILE: tests/test_dask_image/test_imread/__init__.py ================================================ # -*- coding: utf-8 -*- ================================================ FILE: tests/test_dask_image/test_imread/test_core.py ================================================ #!/usr/bin/env python # -*- coding: utf-8 -*- import numbers import pathlib import pytest import numpy as np import tifffile import dask.array as da import dask_image.imread @pytest.mark.parametrize( "err_type, nframes", [ (ValueError, 1.0), (ValueError, 0), (ValueError, -2), ] ) def test_errs_imread(err_type, nframes): with pytest.raises(err_type): dask_image.imread.imread("test.tiff", nframes=nframes) @pytest.mark.parametrize( "seed", [ 0, 1, ] ) @pytest.mark.parametrize( "nframes, shape", [ (1, (1, 4, 3)), (-1, (1, 4, 3)), (3, (1, 4, 3)), (1, (5, 4, 3)), (2, (5, 4, 3)), (1, (10, 5, 4, 3)), (5, (10, 5, 4, 3)), (10, (10, 5, 4, 3)), (-1, (10, 5, 4, 3)), ] ) @pytest.mark.parametrize( "dtype", [ np.int16, np.int32, np.float32, ] ) @pytest.mark.parametrize( "is_pathlib_Path", [ True, False, ] ) def test_tiff_imread(tmpdir, seed, nframes, shape, dtype, is_pathlib_Path): # noqa: E501 np.random.seed(seed) dirpth = tmpdir.mkdir("test_imread") dtype = np.dtype(dtype).type low, high = 0.0, 1.0 if isinstance(dtype, numbers.Integral): low, high = np.iinfo(dtype).min, np.iinfo(dtype).max a = np.random.uniform(low=low, high=high, size=shape).astype(dtype) fn = str(dirpth.join("test.tiff")) with tifffile.TiffWriter(fn) as fh: for i in range(len(a)): fh.write(a[i], contiguous=True) if is_pathlib_Path: fn = pathlib.Path(fn) d = dask_image.imread.imread(fn, nframes=nframes) if nframes == -1: nframes = shape[0] assert min(nframes, shape[0]) == max(d.chunks[0]) if shape[0] % nframes == 0: assert nframes == d.chunks[0][-1] else: assert (shape[0] % nframes) == d.chunks[0][-1] da.utils.assert_eq(a, d) def test_tiff_imread_glob_natural_sort(tmpdir): dirpth = tmpdir.mkdir("test_imread") tifffile.imwrite(dirpth.join("10.tif"), np.array([10])) tifffile.imwrite(dirpth.join("9.tif"), np.array([9])) actual = np.array(dask_image.imread.imread(dirpth.join("*.tif"))) assert np.all(actual == np.array([[9], [10]])) ================================================ FILE: tests/test_dask_image/test_imread/test_cupy_imread.py ================================================ import numpy as np import tifffile import pytest import dask_image.imread cupy = pytest.importorskip("cupy", minversion="6.0.0") @pytest.mark.cupy def test_cupy_imread(tmp_path): a = np.random.uniform(low=0.0, high=1.0, size=(1, 4, 3)).astype(np.float32) fn = str(tmp_path/"test.tiff") with tifffile.TiffWriter(fn) as fh: for i in range(len(a)): fh.write(a[i]) result = dask_image.imread.imread(fn, arraytype="cupy") assert type(result._meta) is cupy.ndarray assert type(result.compute()) == cupy.ndarray ================================================ FILE: tests/test_dask_image/test_ndfilters/__init__.py ================================================ # -*- coding: utf-8 -*- ================================================ FILE: tests/test_dask_image/test_ndfilters/test__conv.py ================================================ #!/usr/bin/env python # -*- coding: utf-8 -*- import pytest import numpy as np import scipy.ndimage import dask.array as da import dask_image.ndfilters @pytest.mark.parametrize( "da_func", [ (dask_image.ndfilters.convolve), (dask_image.ndfilters.correlate), ] ) @pytest.mark.parametrize( "err_type, weights, origin", [ (ValueError, np.ones((1,)), 0), (ValueError, np.ones((1, 0)), 0), (RuntimeError, np.ones((1, 1)), (0,)), (RuntimeError, np.ones((1, 1)), [(0,)]), (ValueError, np.ones((1, 1)), 1), (TypeError, np.ones((1, 1)), 0.0), (TypeError, np.ones((1, 1)), (0.0, 0.0)), (TypeError, np.ones((1, 1)), 1+0j), (TypeError, np.ones((1, 1)), (0+0j, 1+0j)), ] ) def test_convolutions_params(da_func, err_type, weights, origin): a = np.arange(140.0).reshape(10, 14) d = da.from_array(a, chunks=(5, 7)) with pytest.raises(err_type): da_func(d, weights, origin=origin) @pytest.mark.parametrize( "da_func", [ dask_image.ndfilters.convolve, dask_image.ndfilters.correlate, ] ) def test_convolutions_shape_type(da_func): weights = np.ones((1, 1)) a = np.arange(140.0).reshape(10, 14) d = da.from_array(a, chunks=(5, 7)) assert all([(type(s) is int) for s in d.shape]) d2 = da_func(d, weights) assert all([(type(s) is int) for s in d2.shape]) @pytest.mark.parametrize( "da_func", [ dask_image.ndfilters.convolve, dask_image.ndfilters.correlate, ] ) def test_convolutions_comprehensions(da_func): np.random.seed(0) a = np.random.random((3, 12, 14)) d = da.from_array(a, chunks=(3, 6, 7)) weights = np.ones((1, 1)) l2s = [da_func(d[i], weights) for i in range(len(d))] l2c = [da_func(d[i], weights)[None] for i in range(len(d))] da.utils.assert_eq(np.stack(l2s), da.stack(l2s)) da.utils.assert_eq(np.concatenate(l2c), da.concatenate(l2c)) @pytest.mark.parametrize( "sp_func, da_func", [ (scipy.ndimage.convolve, dask_image.ndfilters.convolve), (scipy.ndimage.correlate, dask_image.ndfilters.correlate), ] ) @pytest.mark.parametrize( "weights", [ np.ones((1, 1)), ] ) def test_convolutions_identity(sp_func, da_func, weights): a = np.arange(140.0).reshape(10, 14) d = da.from_array(a, chunks=(5, 7)) da.utils.assert_eq( d, da_func(d, weights) ) da.utils.assert_eq( sp_func(a, weights), da_func(d, weights) ) @pytest.mark.parametrize( "sp_func, da_func", [ (scipy.ndimage.convolve, dask_image.ndfilters.convolve), (scipy.ndimage.correlate, dask_image.ndfilters.correlate), ] ) @pytest.mark.parametrize( "weights, origin", [ (np.ones((2, 2)), 0), (np.ones((2, 3)), 0), (np.ones((2, 3)), (0, 1)), (np.ones((2, 3)), (0, -1)), ((np.mgrid[-2: 2+1, -2: 2+1]**2).sum(axis=0) < 2.5**2, 0), ((np.mgrid[-2: 2+1, -2: 2+1]**2).sum(axis=0) < 2.5**2, (1, 2)), ((np.mgrid[-2: 2+1, -2: 2+1]**2).sum(axis=0) < 2.5**2, (-1, -2)), (np.ones((5, 5)), 0), (np.ones((7, 7)), 0), (np.ones((8, 8)), 0), (np.ones((10, 10)), 0), (np.ones((5, 5)), 2), (np.ones((5, 5)), -2), ] ) def test_convolutions_compare(sp_func, da_func, weights, origin): a = np.arange(140.0).reshape(10, 14) d = da.from_array(a, chunks=(5, 7)) da.utils.assert_eq( sp_func( a, weights, origin=origin ), da_func( d, weights, origin=origin ) ) @pytest.mark.parametrize( "sp_func, da_func", [ (scipy.ndimage.convolve, dask_image.ndfilters.convolve), (scipy.ndimage.correlate, dask_image.ndfilters.correlate), ] ) @pytest.mark.parametrize( "weights", [ np.ones((1, 5)), np.ones((5, 1)), ] ) @pytest.mark.parametrize( "mode", ["reflect", "wrap", "nearest", "constant", "mirror"] ) def test_convolutions_modes(sp_func, da_func, weights, mode): a = np.arange(140).reshape(10, 14) d = da.from_array(a, chunks=(5, 7)) da.utils.assert_eq( sp_func( a, weights, mode=mode ), da_func( d, weights, mode=mode ) ) ================================================ FILE: tests/test_dask_image/test_ndfilters/test__diff.py ================================================ #!/usr/bin/env python # -*- coding: utf-8 -*- import numpy as np import scipy.ndimage import dask.array as da import dask_image.ndfilters def test_laplace_comprehensions(): np.random.seed(0) a = np.random.random((3, 12, 14)) d = da.from_array(a, chunks=(3, 6, 7)) l2s = [dask_image.ndfilters.laplace(d[i]) for i in range(len(d))] l2c = [dask_image.ndfilters.laplace(d[i])[None] for i in range(len(d))] da.utils.assert_eq(np.stack(l2s), da.stack(l2s)) da.utils.assert_eq(np.concatenate(l2c), da.concatenate(l2c)) def test_laplace_compare(): s = (10, 11, 12) a = np.arange(float(np.prod(s))).reshape(s) d = da.from_array(a, chunks=(5, 5, 6)) da.utils.assert_eq( scipy.ndimage.laplace(a), dask_image.ndfilters.laplace(d) ) ================================================ FILE: tests/test_dask_image/test_ndfilters/test__edge.py ================================================ #!/usr/bin/env python # -*- coding: utf-8 -*- import pytest import numpy as np import scipy.ndimage import dask.array as da import dask_image.ndfilters @pytest.mark.parametrize( "err_type, axis", [ (ValueError, 0.0), (ValueError, 2), (ValueError, -3), ] ) @pytest.mark.parametrize( "da_func", [ dask_image.ndfilters.prewitt, dask_image.ndfilters.sobel, ] ) def test_edge_func_params(da_func, err_type, axis): a = np.arange(140.0).reshape(10, 14) d = da.from_array(a, chunks=(5, 7)) with pytest.raises(err_type): da_func(d, axis) @pytest.mark.parametrize( "da_func", [ dask_image.ndfilters.prewitt, dask_image.ndfilters.sobel, ] ) def test_edge_comprehensions(da_func): np.random.seed(0) a = np.random.random((3, 12, 14)) d = da.from_array(a, chunks=(3, 6, 7)) l2s = [da_func(d[i]) for i in range(len(d))] l2c = [da_func(d[i])[None] for i in range(len(d))] da.utils.assert_eq(np.stack(l2s), da.stack(l2s)) da.utils.assert_eq(np.concatenate(l2c), da.concatenate(l2c)) @pytest.mark.parametrize( "axis", [ 0, 1, 2, -1, -2, -3, ] ) @pytest.mark.parametrize( "da_func, sp_func", [ (dask_image.ndfilters.prewitt, scipy.ndimage.prewitt), (dask_image.ndfilters.sobel, scipy.ndimage.sobel), ] ) def test_edge_func_compare(da_func, sp_func, axis): s = (10, 11, 12) a = np.arange(float(np.prod(s))).reshape(s) d = da.from_array(a, chunks=(5, 5, 6)) da.utils.assert_eq( sp_func(a, axis), da_func(d, axis) ) ================================================ FILE: tests/test_dask_image/test_ndfilters/test__gaussian.py ================================================ #!/usr/bin/env python # -*- coding: utf-8 -*- import pytest import numpy as np import scipy.ndimage import dask.array as da import dask_image.ndfilters @pytest.mark.parametrize( "err_type, sigma, truncate", [ (RuntimeError, [[1.0]], 4.0), (RuntimeError, [1.0], 4.0), (TypeError, 1.0 + 0.0j, 4.0), (TypeError, 1.0, 4.0 + 0.0j), ] ) @pytest.mark.parametrize( "da_func", [ dask_image.ndfilters.gaussian, dask_image.ndfilters.gaussian_filter, dask_image.ndfilters.gaussian_gradient_magnitude, dask_image.ndfilters.gaussian_laplace, ] ) def test_gaussian_filters_params(da_func, err_type, sigma, truncate): a = np.arange(140.0).reshape(10, 14) d = da.from_array(a, chunks=(5, 7)) with pytest.raises(err_type): da_func(d, sigma, truncate=truncate) @pytest.mark.parametrize( "sigma, truncate", [ (0.0, 0.0), (0.0, 1.0), (0.0, 4.0), (1.0, 0.0), ] ) @pytest.mark.parametrize( "order", [0, 1, 2, 3] ) @pytest.mark.parametrize( "sp_func, da_func", [ (scipy.ndimage.gaussian_filter, dask_image.ndfilters.gaussian), (scipy.ndimage.gaussian_filter, dask_image.ndfilters.gaussian_filter), ] ) def test_gaussian_filters_identity(sp_func, da_func, order, sigma, truncate): a = np.arange(140.0).reshape(10, 14) d = da.from_array(a, chunks=(5, 7)) if order % 2 == 1 and sigma != 0 and truncate == 0: pytest.skip( "SciPy zeros the result of a Gaussian filter with odd derivatives" " when sigma is non-zero, truncate is zero, and derivative is odd." "\n\nxref: https://github.com/scipy/scipy/issues/7364" ) da.utils.assert_eq( d, da_func(d, sigma, order, truncate=truncate) ) da.utils.assert_eq( sp_func(a, sigma, order, truncate=truncate), da_func(d, sigma, order, truncate=truncate) ) @pytest.mark.parametrize( "da_func", [ dask_image.ndfilters.gaussian, dask_image.ndfilters.gaussian_filter, dask_image.ndfilters.gaussian_gradient_magnitude, dask_image.ndfilters.gaussian_laplace, ] ) def test_gaussian_filter_shape_type(da_func): sigma = 1.0 truncate = 4.0 a = np.arange(140.0).reshape(10, 14) d = da.from_array(a, chunks=(5, 7)) assert all([(type(s) is int) for s in d.shape]) d2 = da_func(d, sigma=sigma, truncate=truncate) assert all([(type(s) is int) for s in d2.shape]) @pytest.mark.parametrize( "da_func", [ dask_image.ndfilters.gaussian, dask_image.ndfilters.gaussian_filter, dask_image.ndfilters.gaussian_gradient_magnitude, dask_image.ndfilters.gaussian_laplace, ] ) def test_gaussian_filter_comprehensions(da_func): da_wfunc = lambda arr: da_func(arr, 1.0, truncate=4.0) # noqa: E731 np.random.seed(0) a = np.random.random((3, 12, 14)) d = da.from_array(a, chunks=(3, 6, 7)) l2s = [da_wfunc(d[i]) for i in range(len(d))] l2c = [da_wfunc(d[i])[None] for i in range(len(d))] da.utils.assert_eq(np.stack(l2s), da.stack(l2s)) da.utils.assert_eq(np.concatenate(l2c), da.concatenate(l2c)) @pytest.mark.parametrize( "sigma, truncate", [ (1.0, 2.0), (1.0, 4.0), (2.0, 2.0), (2.0, 4.0), ((1.0, 2.0), 4.0), ] ) @pytest.mark.parametrize( "sp_func, da_func", [ (scipy.ndimage.gaussian_filter, dask_image.ndfilters.gaussian), (scipy.ndimage.gaussian_filter, dask_image.ndfilters.gaussian_filter), (scipy.ndimage.gaussian_gradient_magnitude, dask_image.ndfilters.gaussian_gradient_magnitude), (scipy.ndimage.gaussian_laplace, dask_image.ndfilters.gaussian_laplace), ] ) def test_gaussian_filters_compare(sp_func, da_func, sigma, truncate): s = (100, 110) a = np.arange(float(np.prod(s))).reshape(s) d = da.from_array(a, chunks=(50, 55)) da.utils.assert_eq( sp_func(a, sigma, truncate=truncate), da_func(d, sigma, truncate=truncate) ) @pytest.mark.parametrize( "sigma, truncate", [ (0.0, 0.0), (1.0, 0.0), (0.0, 1.0), (1.0, 2.0), (1.0, 4.0), (2.0, 2.0), (2.0, 4.0), ((1.0, 2.0), 4.0), ] ) @pytest.mark.parametrize( "order", [ 0, 1, 2, 3, (0, 1), (2, 3), ] ) @pytest.mark.parametrize( "sp_func, da_func", [ (scipy.ndimage.gaussian_filter, dask_image.ndfilters.gaussian), (scipy.ndimage.gaussian_filter, dask_image.ndfilters.gaussian_filter), ] ) def test_gaussian_derivative_filters_compare(sp_func, da_func, order, sigma, truncate): s = (100, 110) a = np.arange(float(np.prod(s))).reshape(s) d = da.from_array(a, chunks=(50, 55)) da.utils.assert_eq( sp_func(a, sigma, order, truncate=truncate), da_func(d, sigma, order, truncate=truncate) ) ================================================ FILE: tests/test_dask_image/test_ndfilters/test__generic.py ================================================ #!/usr/bin/env python # -*- coding: utf-8 -*- import pytest import numpy as np import scipy.ndimage import dask.array as da import dask_image.ndfilters @pytest.mark.parametrize( "da_func", [ dask_image.ndfilters.generic_filter, ], ) @pytest.mark.parametrize( "err_type, function, size, footprint, origin", [ (RuntimeError, lambda x: x, None, None, 0), (TypeError, lambda x: x, 1.0, None, 0), (RuntimeError, lambda x: x, (1,), None, 0), (RuntimeError, lambda x: x, [(1,)], None, 0), (RuntimeError, lambda x: x, 1, np.ones((1,)), 0), (RuntimeError, lambda x: x, None, np.ones((1,)), 0), (RuntimeError, lambda x: x, None, np.ones((1, 0)), 0), (RuntimeError, lambda x: x, 1, None, (0,)), (RuntimeError, lambda x: x, 1, None, [(0,)]), (ValueError, lambda x: x, 1, None, 1), (TypeError, lambda x: x, 1, None, 0.0), (TypeError, lambda x: x, 1, None, (0.0, 0.0)), (TypeError, lambda x: x, 1, None, 1 + 0j), (TypeError, lambda x: x, 1, None, (0 + 0j, 1 + 0j)), ], ) def test_generic_filters_params(da_func, err_type, function, size, footprint, origin): a = np.arange(140.0).reshape(10, 14) d = da.from_array(a, chunks=(5, 7)) with pytest.raises(err_type): da_func(d, function, size=size, footprint=footprint, origin=origin) @pytest.mark.parametrize( "da_func", [ dask_image.ndfilters.generic_filter, ], ) def test_generic_filter_shape_type(da_func): function = lambda x: x # noqa: E731 size = 1 a = np.arange(140.0).reshape(10, 14) d = da.from_array(a, chunks=(5, 7)) assert all([(type(s) is int) for s in d.shape]) d2 = da_func(d, function, size=size) assert all([(type(s) is int) for s in d2.shape]) @pytest.mark.parametrize( "sp_func, da_func", [(scipy.ndimage.generic_filter, dask_image.ndfilters.generic_filter)], ) @pytest.mark.parametrize( "function, size, footprint", [ (lambda x: x[0], 1, None), (lambda x: x[0], (1, 1), None), (lambda x: x[0], None, np.ones((1, 1))), ], ) def test_generic_filter_identity(sp_func, da_func, function, size, footprint): a = np.arange(140.0).reshape(10, 14) d = da.from_array(a, chunks=(5, 7)) da.utils.assert_eq(d, da_func(d, function, size=size, footprint=footprint)) da.utils.assert_eq( sp_func(a, function, size=size, footprint=footprint), da_func(d, function, size=size, footprint=footprint), ) @pytest.mark.parametrize( "da_func", [ dask_image.ndfilters.generic_filter, ], ) def test_generic_filter_comprehensions(da_func): da_wfunc = lambda arr: da_func(arr, lambda x: x[0], 1) # noqa: E731 np.random.seed(0) a = np.random.random((3, 12, 14)) d = da.from_array(a, chunks=(3, 6, 7)) l2s = [da_wfunc(d[i]) for i in range(len(d))] l2c = [da_wfunc(d[i])[None] for i in range(len(d))] da.utils.assert_eq(np.stack(l2s), da.stack(l2s)) da.utils.assert_eq(np.concatenate(l2c), da.concatenate(l2c)) @pytest.mark.parametrize( "sp_func, da_func", [(scipy.ndimage.generic_filter, dask_image.ndfilters.generic_filter)], ) @pytest.mark.parametrize( "function, size, footprint, origin", [ (lambda x: (np.array(x) ** 2).sum(), 2, None, 0), (lambda x: (np.array(x) ** 2).sum(), None, np.ones((2, 3)), 0), (lambda x: (np.array(x) ** 2).sum(), None, np.ones((2, 3)), (0, 1)), (lambda x: (np.array(x) ** 2).sum(), None, np.ones((2, 3)), (0, -1)), ( lambda x: (np.array(x) ** 2).sum(), None, (np.mgrid[-2: 2 + 1, -2: 2 + 1] ** 2).sum(axis=0) < 2.5 ** 2, 0, ), ( lambda x: (np.array(x) ** 2).sum(), None, (np.mgrid[-2: 2 + 1, -2: 2 + 1] ** 2).sum(axis=0) < 2.5 ** 2, (1, 2), ), ( lambda x: (np.array(x) ** 2).sum(), None, (np.mgrid[-2: 2 + 1, -2: 2 + 1] ** 2).sum(axis=0) < 2.5 ** 2, (-1, -2), ), (lambda x: (np.array(x) ** 2).sum(), 5, None, 0), (lambda x: (np.array(x) ** 2).sum(), 7, None, 0), (lambda x: (np.array(x) ** 2).sum(), 8, None, 0), (lambda x: (np.array(x) ** 2).sum(), 10, None, 0), (lambda x: (np.array(x) ** 2).sum(), 5, None, 2), (lambda x: (np.array(x) ** 2).sum(), 5, None, -2), ], ) def test_generic_filter_compare(sp_func, da_func, function, size, footprint, origin): a = np.arange(140.0).reshape(10, 14) d = da.from_array(a, chunks=(5, 7)) da.utils.assert_eq( sp_func(a, function, size=size, footprint=footprint, origin=origin), da_func(d, function, size=size, footprint=footprint, origin=origin), ) ================================================ FILE: tests/test_dask_image/test_ndfilters/test__order.py ================================================ #!/usr/bin/env python # -*- coding: utf-8 -*- import pytest import numpy as np import scipy.ndimage import dask.array as da import dask_image.ndfilters @pytest.mark.parametrize( "da_func, extra_kwargs", [ (dask_image.ndfilters.minimum_filter, {}), (dask_image.ndfilters.median_filter, {}), (dask_image.ndfilters.maximum_filter, {}), (dask_image.ndfilters.rank_filter, {"rank": 0}), (dask_image.ndfilters.percentile_filter, {"percentile": 0}), ] ) @pytest.mark.parametrize( "err_type, size, footprint, origin", [ (RuntimeError, None, None, 0), (TypeError, 1.0, None, 0), (RuntimeError, (1,), None, 0), (RuntimeError, [(1,)], None, 0), (RuntimeError, 1, np.ones((1,)), 0), (RuntimeError, None, np.ones((1,)), 0), (RuntimeError, None, np.ones((1, 0)), 0), (RuntimeError, 1, None, (0,)), (RuntimeError, 1, None, [(0,)]), (ValueError, 1, None, 1), (TypeError, 1, None, 0.0), (TypeError, 1, None, (0.0, 0.0)), (TypeError, 1, None, 1+0j), (TypeError, 1, None, (0+0j, 1+0j)), ] ) def test_order_filter_params(da_func, extra_kwargs, err_type, size, footprint, origin): a = np.arange(140.0).reshape(10, 14) d = da.from_array(a, chunks=(5, 7)) with pytest.raises(err_type): da_func(d, size=size, footprint=footprint, origin=origin, **extra_kwargs) @pytest.mark.parametrize( "da_func, extra_kwargs", [ (dask_image.ndfilters.minimum_filter, {}), (dask_image.ndfilters.median_filter, {}), (dask_image.ndfilters.maximum_filter, {}), (dask_image.ndfilters.rank_filter, {"rank": 0}), (dask_image.ndfilters.percentile_filter, {"percentile": 0}), ] ) def test_ordered_filter_shape_type(da_func, extra_kwargs): size = 1 a = np.arange(140.0).reshape(10, 14) d = da.from_array(a, chunks=(5, 7)) assert all([(type(s) is int) for s in d.shape]) d2 = da_func(d, size=size, **extra_kwargs) assert all([(type(s) is int) for s in d2.shape]) @pytest.mark.parametrize( "sp_func, da_func, extra_kwargs", [ (scipy.ndimage.minimum_filter, dask_image.ndfilters.minimum_filter, {}), (scipy.ndimage.median_filter, dask_image.ndfilters.median_filter, {}), (scipy.ndimage.maximum_filter, dask_image.ndfilters.maximum_filter, {}), (scipy.ndimage.rank_filter, dask_image.ndfilters.rank_filter, {"rank": 0}), (scipy.ndimage.percentile_filter, dask_image.ndfilters.percentile_filter, {"percentile": 0}), ] ) @pytest.mark.parametrize( "size, footprint", [ (1, None), ((1, 1), None), (None, np.ones((1, 1))), ] ) def test_ordered_filter_identity(sp_func, da_func, extra_kwargs, size, footprint): a = np.arange(140.0).reshape(10, 14) d = da.from_array(a, chunks=(5, 7)) da.utils.assert_eq( d, da_func(d, size=size, footprint=footprint, **extra_kwargs) ) da.utils.assert_eq( sp_func(a, size=size, footprint=footprint, **extra_kwargs), da_func(d, size=size, footprint=footprint, **extra_kwargs) ) @pytest.mark.parametrize( "da_func, kwargs", [ (dask_image.ndfilters.minimum_filter, {"size": 1}), (dask_image.ndfilters.median_filter, {"size": 1}), (dask_image.ndfilters.maximum_filter, {"size": 1}), (dask_image.ndfilters.rank_filter, {"size": 1, "rank": 0}), (dask_image.ndfilters.percentile_filter, {"size": 1, "percentile": 0}), ] ) def test_order_comprehensions(da_func, kwargs): np.random.seed(0) a = np.random.random((3, 12, 14)) d = da.from_array(a, chunks=(3, 6, 7)) l2s = [da_func(d[i], **kwargs) for i in range(len(d))] l2c = [da_func(d[i], **kwargs)[None] for i in range(len(d))] da.utils.assert_eq(np.stack(l2s), da.stack(l2s)) da.utils.assert_eq(np.concatenate(l2c), da.concatenate(l2c)) @pytest.mark.parametrize( "sp_func, da_func, extra_kwargs", [ (scipy.ndimage.minimum_filter, dask_image.ndfilters.minimum_filter, {}), (scipy.ndimage.median_filter, dask_image.ndfilters.median_filter, {}), (scipy.ndimage.maximum_filter, dask_image.ndfilters.maximum_filter, {}), (scipy.ndimage.rank_filter, dask_image.ndfilters.rank_filter, {"rank": 1}), (scipy.ndimage.percentile_filter, dask_image.ndfilters.percentile_filter, {"percentile": 10}), ] ) @pytest.mark.parametrize( "size, footprint, origin", [ (2, None, 0), (None, np.ones((2, 3)), 0), (None, np.ones((2, 3)), (0, 1)), (None, np.ones((2, 3)), (0, -1)), (None, (np.mgrid[-2: 2+1, -2: 2+1]**2).sum(axis=0) < 2.5**2, 0), (None, (np.mgrid[-2: 2+1, -2: 2+1]**2).sum(axis=0) < 2.5**2, (1, 2)), (None, (np.mgrid[-2: 2+1, -2: 2+1]**2).sum(axis=0) < 2.5**2, (-1, -2)), (5, None, 0), (7, None, 0), (8, None, 0), (10, None, 0), (5, None, 2), (5, None, -2), ] ) def test_ordered_filter_compare(sp_func, da_func, extra_kwargs, size, footprint, origin): a = np.arange(140.0).reshape(10, 14) d = da.from_array(a, chunks=(5, 7)) da.utils.assert_eq( sp_func( a, size=size, footprint=footprint, origin=origin, **extra_kwargs ), da_func( d, size=size, footprint=footprint, origin=origin, **extra_kwargs ) ) ================================================ FILE: tests/test_dask_image/test_ndfilters/test__smooth.py ================================================ #!/usr/bin/env python # -*- coding: utf-8 -*- import pytest import numpy as np import scipy.ndimage import dask.array as da import dask_image.ndfilters @pytest.mark.parametrize( "err_type, size, origin", [ (TypeError, 3.0, 0), (TypeError, 3, 0.0), (RuntimeError, [3], 0), (RuntimeError, 3, [0]), (RuntimeError, [[3]], 0), (RuntimeError, 3, [[0]]), ] ) def test_uniform_filter_params(err_type, size, origin): a = np.arange(140.0).reshape(10, 14) d = da.from_array(a, chunks=(5, 7)) with pytest.raises(err_type): dask_image.ndfilters.uniform_filter(d, size, origin=origin) def test_uniform_shape_type(): size = 1 origin = 0 a = np.arange(140.0).reshape(10, 14) d = da.from_array(a, chunks=(5, 7)) assert all([(type(s) is int) for s in d.shape]) d2 = dask_image.ndfilters.uniform_filter(d, size, origin=origin) assert all([(type(s) is int) for s in d2.shape]) def test_uniform_comprehensions(): da_func = lambda arr: dask_image.ndfilters.uniform_filter(arr, 1, origin=0) # noqa: E731, E501 np.random.seed(0) a = np.random.random((3, 12, 14)) d = da.from_array(a, chunks=(3, 6, 7)) l2s = [da_func(d[i]) for i in range(len(d))] l2c = [da_func(d[i])[None] for i in range(len(d))] da.utils.assert_eq(np.stack(l2s), da.stack(l2s)) da.utils.assert_eq(np.concatenate(l2c), da.concatenate(l2c)) @pytest.mark.parametrize( "size, origin", [ (1, 0), ] ) def test_uniform_identity(size, origin): a = np.arange(140.0).reshape(10, 14) d = da.from_array(a, chunks=(5, 7)) da.utils.assert_eq( d, dask_image.ndfilters.uniform_filter(d, size, origin=origin) ) da.utils.assert_eq( scipy.ndimage.uniform_filter(a, size, origin=origin), dask_image.ndfilters.uniform_filter(d, size, origin=origin) ) @pytest.mark.parametrize( "size, origin", [ (2, 0), (3, 0), (3, 1), (3, (1, 0)), ((1, 2), 0), ((3, 2), (1, 0)), ] ) def test_uniform_compare(size, origin): s = (100, 110) a = np.arange(float(np.prod(s))).reshape(s) d = da.from_array(a, chunks=(50, 55)) da.utils.assert_eq( scipy.ndimage.uniform_filter(a, size, origin=origin), dask_image.ndfilters.uniform_filter(d, size, origin=origin) ) ================================================ FILE: tests/test_dask_image/test_ndfilters/test__threshold.py ================================================ import dask.array as da import numpy as np from numpy.testing import assert_equal import pytest from dask_image.ndfilters import threshold_local @pytest.fixture def simple_test_image(): image = da.from_array(np.array( [[0, 0, 1, 3, 5], [0, 1, 4, 3, 4], [1, 2, 5, 4, 1], [2, 4, 5, 2, 1], [4, 5, 1, 0, 0]], dtype=int), chunks=(5, 5)) return image # ================================================== # Test Threshold Filters # ================================================== @pytest.mark.parametrize('block_size', [ 3, [3, 3], np.array([3, 3]), da.from_array(np.array([3, 3]), chunks=1), da.from_array(np.array([3, 3]), chunks=2), ]) def test_threshold_local_gaussian(simple_test_image, block_size): ref = np.array( [[False, False, False, False, True], [False, False, True, False, True], [False, False, True, True, False], [False, True, True, False, False], [True, True, False, False, False]] ) out = threshold_local(simple_test_image, block_size, method='gaussian') assert_equal(ref, (simple_test_image > out).compute()) out = threshold_local( simple_test_image, block_size, method='gaussian', param=1./3. ) assert_equal(ref, (simple_test_image > out).compute()) @pytest.mark.parametrize('block_size', [ 3, [3, 3], np.array([3, 3]), da.from_array(np.array([3, 3]), chunks=1), da.from_array(np.array([3, 3]), chunks=2), ]) def test_threshold_local_mean(simple_test_image, block_size): ref = np.array( [[False, False, False, False, True], [False, False, True, False, True], [False, False, True, True, False], [False, True, True, False, False], [True, True, False, False, False]] ) out = threshold_local(simple_test_image, block_size, method='mean') assert_equal(ref, (simple_test_image > out).compute()) @pytest.mark.parametrize('block_size', [ 3, [3, 3], np.array([3, 3]), da.from_array(np.array([3, 3]), chunks=1), da.from_array(np.array([3, 3]), chunks=2), ]) def test_threshold_local_median(simple_test_image, block_size): ref = np.array( [[False, False, False, False, True], [False, False, True, False, False], [False, False, True, False, False], [False, False, True, True, False], [False, True, False, False, False]] ) out = threshold_local(simple_test_image, block_size, method='median') assert_equal(ref, (simple_test_image > out).compute()) # ================================================== # Test Generic Filters # ================================================== def test_threshold_local_generic(simple_test_image): ref = np.array( [[1., 7., 16., 29., 37.], [5., 14., 23., 30., 30.], [13., 24., 30., 29., 21.], [25., 29., 28., 19., 10.], [34., 31., 23., 10., 4.]] ) unchanged = threshold_local( simple_test_image, 1, method='generic', param=sum ) out = threshold_local(simple_test_image, 3, method='generic', param=sum) assert np.allclose(unchanged.compute(), simple_test_image.compute()) assert np.allclose(out.compute(), ref) def test_threshold_local_generic_invalid(simple_test_image): expected_error_message = "Must include a valid function to use as " "the 'param' keyword argument." with pytest.raises(ValueError) as e: threshold_local(simple_test_image, 3, method='generic', param='sum') assert e == expected_error_message # ================================================== # Test Invalid Arguments # ================================================== @pytest.mark.parametrize("method, block_size, error_type", [ ('median', np.nan, TypeError), ]) def test_nan_blocksize(simple_test_image, method, block_size, error_type): with pytest.raises(error_type): threshold_local(simple_test_image, block_size, method=method) def test_invalid_threshold_method(simple_test_image): with pytest.raises(ValueError): threshold_local(simple_test_image, 3, method='invalid') ================================================ FILE: tests/test_dask_image/test_ndfilters/test__utils.py ================================================ #!/usr/bin/env python # -*- coding: utf-8 -*- import inspect import pytest import numpy as np from dask_image.ndfilters import _utils def test__get_docstring(): f = lambda: 0 # noqa: E731 result = _utils._get_docstring(f) expected = """ Wrapped copy of "{mod_name}.{func_name}" Excludes the output parameter as it would not work with Dask arrays. Original docstring: {doc} """.format( mod_name=inspect.getmodule(f).__name__, func_name=f.__name__, doc="", ) assert result == expected def test__update_wrapper(): f = lambda: 0 # noqa: E731 @_utils._update_wrapper(f) def g(): return f() assert f.__name__ == g.__name__ expected = """ Wrapped copy of "{mod_name}.{func_name}" Excludes the output parameter as it would not work with Dask arrays. Original docstring: {doc} """.format( mod_name=inspect.getmodule(g).__name__, func_name=g.__name__, doc="", ) assert g.__doc__ == expected @pytest.mark.parametrize( "err_type, ndim, depth, boundary", [ (TypeError, lambda: 0, 1, None), (TypeError, 1.0, 1, None), (ValueError, -1, 1, None), (TypeError, 1, lambda: 0, None), (TypeError, 1, 1.0, None), (ValueError, 1, -1, None), (ValueError, 1, (1, 1), None), (ValueError, 1, {0: 1, 1: 1}, None), (TypeError, 1, {1}, None), (TypeError, 1, 1, 1), (ValueError, 1, 1, (None, None)), (ValueError, 1, 1, {0: None, 1: None}), (TypeError, 1, 1, (1,)), (TypeError, 1, 1, {1}), ] ) def test_errs__get_depth_boundary(err_type, ndim, depth, boundary): with pytest.raises(err_type): _utils._get_depth_boundary(ndim, depth, boundary) @pytest.mark.parametrize( "err_type, ndim, size", [ (TypeError, 1.0, 1), (RuntimeError, 1, [[1]]), (TypeError, 1, 1.0), (TypeError, 1, [1.0]), (RuntimeError, 1, [1, 1]), ] ) def test_errs__get_size(err_type, ndim, size): with pytest.raises(err_type): _utils._get_size(ndim, size) @pytest.mark.parametrize( "err_type, size, origin", [ (TypeError, [1], 1.0), (TypeError, [1], [1.0]), (RuntimeError, [1], [[1]]), (RuntimeError, [1], [1, 1]), (ValueError, [1], [2]), ] ) def test_errs__get_origin(err_type, size, origin): with pytest.raises(err_type): _utils._get_origin(size, origin) @pytest.mark.parametrize( "err_type, ndim, size, footprint", [ (RuntimeError, 1, None, None), (RuntimeError, 1, [2], np.ones((2,), dtype=bool)), (RuntimeError, 1, None, np.ones((1, 2), dtype=bool)), (RuntimeError, 1, None, np.ones([0], dtype=bool)), ] ) def test_errs__get_footprint(err_type, ndim, size, footprint): with pytest.raises(err_type): _utils._get_footprint(ndim, size=size, footprint=footprint) @pytest.mark.parametrize( "expected, ndim, depth, boundary", [ (({0: 0}, {0: "none"}), 1, 0, "none"), (({0: 0}, {0: "reflect"}), 1, 0, "reflect"), (({0: 0}, {0: "periodic"}), 1, 0, "periodic"), (({0: 1}, {0: "none"}), 1, 1, "none"), ] ) def test__get_depth_boundary(expected, ndim, depth, boundary): assert expected == _utils._get_depth_boundary(ndim, depth, boundary) @pytest.mark.parametrize( "expected, ndim, size", [ ((1,), 1, 1), ((3, 3), 2, 3), ((2, 4), 2, (2, 4)), ] ) def test__get_size(expected, ndim, size): assert expected == _utils._get_size(ndim, size) @pytest.mark.parametrize( "expected, size, origin", [ ((0,), (1,), 0), ((1,), (3,), 1), ((1, 2), (3, 5), (1, 2)), ] ) def test__get_origin(expected, size, origin): assert expected == _utils._get_origin(size, origin) @pytest.mark.parametrize( "expected, size, origin", [ ((0,), (1,), 0), ((1,), (3,), 0), ((2,), (3,), 1), ((2, 4), (3, 5), (1, 2)), ] ) def test__get_depth(expected, size, origin): assert expected == _utils._get_depth(size, origin) @pytest.mark.parametrize( "expected, ndim, size, footprint", [ (np.ones((2,), dtype=bool), 1, 2, None), (np.ones((2,), dtype=bool), 1, None, np.ones((2,), dtype=bool)), ] ) def test__get_footprint(expected, ndim, size, footprint): assert (expected == _utils._get_footprint(ndim, size, footprint)).all() ================================================ FILE: tests/test_dask_image/test_ndfilters/test_cupy_ndfilters.py ================================================ #!/usr/bin/env python # -*- coding: utf-8 -*- import dask.array as da import numpy as np import pytest import dask_image.ndfilters cupy = pytest.importorskip("cupy", minversion="8.0.0") @pytest.fixture def array(): s = (10, 10) a = da.from_array(cupy.arange(int(np.prod(s)), dtype=cupy.float32).reshape(s), chunks=5) return a @pytest.mark.cupy @pytest.mark.parametrize("func", [ dask_image.ndfilters.convolve, dask_image.ndfilters.correlate, ]) def test_cupy_conv(array, func): """Test convolve & correlate filters with cupy input arrays.""" weights = cupy.ones(array.ndim * (3,), dtype=cupy.float32) result = func(array, weights) result.compute() @pytest.mark.cupy @pytest.mark.parametrize("func", [ dask_image.ndfilters.laplace, ]) def test_cupy_diff(array, func): result = func(array) result.compute() @pytest.mark.cupy @pytest.mark.parametrize("func", [ dask_image.ndfilters.prewitt, dask_image.ndfilters.sobel, ]) def test_cupy_edge(array, func): result = func(array) result.compute() @pytest.mark.cupy @pytest.mark.parametrize("func", [ dask_image.ndfilters.gaussian, dask_image.ndfilters.gaussian_filter, dask_image.ndfilters.gaussian_gradient_magnitude, dask_image.ndfilters.gaussian_laplace, ]) def test_cupy_gaussian(array, func): sigma = 1 result = func(array, sigma) result.compute() @pytest.mark.parametrize( "size, footprint", [ (1, None), ((1, 1), None), (None, np.ones((1, 1))), ] ) def test_cupy_generic(array, size, footprint): my_sum = cupy.ReductionKernel( 'T x', 'T out', 'x', 'a + b', 'out = a', '0', 'my_sum') result = dask_image.ndfilters.generic_filter(array, my_sum, size=size, footprint=footprint) result.compute() @pytest.mark.cupy @pytest.mark.parametrize("func, extra_arg, size", [ (dask_image.ndfilters.minimum_filter, None, 3), (dask_image.ndfilters.median_filter, None, 3), (dask_image.ndfilters.maximum_filter, None, 3), (dask_image.ndfilters.rank_filter, 5, 3), (dask_image.ndfilters.percentile_filter, 50, 3), ]) def test_cupy_order(array, func, extra_arg, size): if extra_arg is not None: result = func(array, extra_arg, size=size) else: result = func(array, size=size) result.compute() @pytest.mark.cupy @pytest.mark.parametrize("func", [ dask_image.ndfilters.uniform_filter, ]) def test_cupy_smooth(array, func): result = func(array) result.compute() ================================================ FILE: tests/test_dask_image/test_ndfilters/test_cupy_threshold.py ================================================ #!/usr/bin/env python # -*- coding: utf-8 -*- import dask.array as da import numpy as np import pytest from dask_image.ndfilters import threshold_local cupy = pytest.importorskip("cupy", minversion="5.0.0") @pytest.fixture def simple_test_image(): image = da.from_array(cupy.array( [[0, 0, 1, 3, 5], [0, 1, 4, 3, 4], [1, 2, 5, 4, 1], [2, 4, 5, 2, 1], [4, 5, 1, 0, 0]], dtype=int), chunks=(5, 5)) return image # ================================================== # Test Threshold Filters # ================================================== @pytest.mark.cupy @pytest.mark.parametrize('block_size', [ 3, [3, 3], np.array([3, 3]), da.from_array(np.array([3, 3]), chunks=1), da.from_array(np.array([3, 3]), chunks=2), ]) def test_threshold_local_gaussian(simple_test_image, block_size): ref = np.array( [[False, False, False, False, True], [False, False, True, False, True], [False, False, True, True, False], [False, True, True, False, False], [True, True, False, False, False]] ) out = threshold_local(simple_test_image, block_size, method='gaussian') cupy.testing.assert_array_equal(ref, (simple_test_image > out).compute()) out = threshold_local( simple_test_image, block_size, method='gaussian', param=1./3. ) cupy.testing.assert_array_equal(ref, (simple_test_image > out).compute()) @pytest.mark.cupy @pytest.mark.parametrize('block_size', [ 3, [3, 3], np.array([3, 3]), da.from_array(np.array([3, 3]), chunks=1), da.from_array(np.array([3, 3]), chunks=2), ]) def test_threshold_local_mean(simple_test_image, block_size): ref = cupy.array( [[False, False, False, False, True], [False, False, True, False, True], [False, False, True, True, False], [False, True, True, False, False], [True, True, False, False, False]] ) out = threshold_local(simple_test_image, block_size, method='mean') cupy.testing.assert_array_equal(ref, (simple_test_image > out).compute()) @pytest.mark.cupy @pytest.mark.parametrize('block_size', [ 3, [3, 3], np.array([3, 3]), da.from_array(np.array([3, 3]), chunks=1), da.from_array(np.array([3, 3]), chunks=2), ]) def test_threshold_local_median(simple_test_image, block_size): ref = cupy.array( [[False, False, False, False, True], [False, False, True, False, False], [False, False, True, False, False], [False, False, True, True, False], [False, True, False, False, False]] ) out = threshold_local(simple_test_image, block_size, method='median') cupy.testing.assert_array_equal(ref, (simple_test_image > out).compute()) # ================================================== # Test Generic Filters # ================================================== def test_threshold_local_generic(simple_test_image): ref = cupy.array( [[1., 7., 16., 29., 37.], [5., 14., 23., 30., 30.], [13., 24., 30., 29., 21.], [25., 29., 28., 19., 10.], [34., 31., 23., 10., 4.]] ) my_sum = cupy.ReductionKernel( 'T x', 'T out', 'x', 'a + b', 'out = a', '0', 'my_sum') unchanged = threshold_local(simple_test_image, 1, method='generic', param=my_sum) # noqa: E501 out = threshold_local(simple_test_image, 3, method='generic', param=my_sum) assert cupy.allclose(unchanged.compute(), simple_test_image.compute()) assert cupy.allclose(out.compute(), ref) def test_threshold_local_generic_invalid(simple_test_image): expected_error_message = "Must include a valid function to use as " "the 'param' keyword argument." with pytest.raises(ValueError) as e: threshold_local(simple_test_image, 3, method='generic', param='sum') assert e == expected_error_message # ================================================== # Test Invalid Arguments # ================================================== @pytest.mark.parametrize("method, block_size, error_type", [ ('median', cupy.nan, TypeError), ]) def test_nan_blocksize(simple_test_image, method, block_size, error_type): with pytest.raises(error_type): threshold_local(simple_test_image, block_size, method=method) def test_invalid_threshold_method(simple_test_image): with pytest.raises(ValueError): threshold_local(simple_test_image, 3, method='invalid') ================================================ FILE: tests/test_dask_image/test_ndfourier/test__utils.py ================================================ # -*- coding: utf-8 -*- import numbers import pytest import dask.array as da import dask_image.ndfourier._utils @pytest.mark.parametrize( "a, s, n, axis", [ (da.ones((3, 4), chunks=(3, 4)), da.ones((2,), chunks=(2,)), -1, -1), (da.ones((3, 4), dtype='i8', chunks=(3, 4)), da.ones((2,), dtype='i8', chunks=(2,)), -1, -1), ] ) def test_norm_args(a, s, n, axis): a2, s2, n2, axis2 = dask_image.ndfourier._utils._norm_args( a, s, n=n, axis=axis ) assert isinstance(a2, da.Array) assert isinstance(s2, da.Array) assert issubclass(a2.real.dtype.type, numbers.Real) assert issubclass(s2.dtype.type, numbers.Real) ================================================ FILE: tests/test_dask_image/test_ndfourier/test_core.py ================================================ #!/usr/bin/env python # -*- coding: utf-8 -*- import numbers import pytest import numpy as np import scipy.ndimage import dask.array as da import dask_image.ndfourier @pytest.mark.parametrize( "err_type, s, n", [ (NotImplementedError, 0.0, 0), (TypeError, 0.0 + 0.0j, 0), (TypeError, {}, 0), (RuntimeError, [0.0], 0), (RuntimeError, [[0.0], [0.0]], 0), (TypeError, [0.0, 0.0 + 0.0j], 0), (NotImplementedError, 0, 0), ] ) @pytest.mark.parametrize( "funcname", [ "fourier_shift", "fourier_gaussian", "fourier_uniform", ] ) def test_fourier_filter_err(funcname, err_type, s, n): da_func = getattr(dask_image.ndfourier, funcname) a = np.arange(140.0).reshape(10, 14).astype(complex) d = da.from_array(a, chunks=(5, 7)) with pytest.raises(err_type): da_func(d, s, n) @pytest.mark.parametrize( "s", [ 0, (0, 0), ] ) @pytest.mark.parametrize( "funcname", [ "fourier_shift", "fourier_gaussian", ] ) def test_fourier_filter_identity(funcname, s): da_func = getattr(dask_image.ndfourier, funcname) sp_func = getattr(scipy.ndimage, funcname) a = np.arange(140.0).reshape(10, 14).astype(complex) d = da.from_array(a, chunks=(5, 7)) r_a = sp_func(a, s) r_d = da_func(d, s) assert d.chunks == r_d.chunks da.utils.assert_eq(d, r_d) da.utils.assert_eq(r_a, r_d) @pytest.mark.parametrize( "dtype", [ np.int64, np.float32, np.float64, np.complex64, np.complex128, ] ) @pytest.mark.parametrize( "funcname, upcast_type", [ ("fourier_shift", numbers.Real), ("fourier_gaussian", numbers.Integral), ("fourier_uniform", numbers.Integral), ] ) def test_fourier_filter_type(funcname, upcast_type, dtype): if ( dtype in [np.int64, np.float64] and funcname in ["fourier_gaussian", "fourier_uniform"] ): pytest.skip( "SciPy 1.0.0+ doesn't handle double precision values correctly." ) dtype = np.dtype(dtype).type s = 1 da_func = getattr(dask_image.ndfourier, funcname) sp_func = getattr(scipy.ndimage, funcname) a = np.arange(140.0).reshape(10, 14).astype(dtype) d = da.from_array(a, chunks=(5, 7)) r_a = sp_func(a, s) r_d = da_func(d, s) assert d.chunks == r_d.chunks da.utils.assert_eq(r_a, r_d) if issubclass(dtype, upcast_type): assert r_d.real.dtype.type is np.float64 else: assert r_d.dtype.type is dtype @pytest.mark.parametrize( "shape, chunks", [ ((10, 14), (10, 14)), ((10, 14), (5, 7)), ((10, 14), (6, 8)), ((10, 14), (4, 6)), ((16,), (3, 6, 2, 5)), ] ) @pytest.mark.parametrize( "funcname", [ "fourier_shift", "fourier_gaussian", "fourier_uniform", ] ) def test_fourier_filter_chunks(funcname, shape, chunks): dtype = np.dtype(complex).type s = 1 da_func = getattr(dask_image.ndfourier, funcname) sp_func = getattr(scipy.ndimage, funcname) a = np.arange(np.prod(shape)).reshape(shape).astype(dtype) d = da.from_array(a, chunks=chunks) r_a = sp_func(a, s) r_d = da_func(d, s) assert d.chunks == r_d.chunks da.utils.assert_eq(r_a, r_d) @pytest.mark.parametrize( "s", [ -1, (-1, -1), (-1, 2), (10, -9), (1, 0), (0, 2), ] ) @pytest.mark.parametrize( "funcname", [ "fourier_shift", "fourier_gaussian", ] ) def test_fourier_filter_non_positive(funcname, s): da_func = getattr(dask_image.ndfourier, funcname) sp_func = getattr(scipy.ndimage, funcname) a = np.arange(140.0).reshape(10, 14).astype(complex) d = da.from_array(a, chunks=(5, 7)) r_a = sp_func(a, s) r_d = da_func(d, s) assert d.chunks == r_d.chunks da.utils.assert_eq(r_a, r_d) @pytest.mark.parametrize( "s", [ 1, 0.5, (1, 1), (0.8, 1.5), np.ones((2,)), da.ones((2,), chunks=(2,)), da.ones((2,), chunks=(1,)), ] ) @pytest.mark.parametrize( "funcname", [ "fourier_shift", "fourier_gaussian", "fourier_uniform", ] ) @pytest.mark.parametrize( "real_fft, axis", [ (True, -1), (True, 0), (False, -1), ] ) def test_fourier_filter(funcname, s, real_fft, axis): da_func = getattr(dask_image.ndfourier, funcname) sp_func = getattr(scipy.ndimage, funcname) shape = (10, 14) n = 2 * shape[axis] - 1 if real_fft else -1 dtype = np.float64 if real_fft else np.complex128 a = np.arange(140.0).reshape(shape).astype(dtype) d = da.from_array(a, chunks=(5, 7)) r_a = sp_func(a, s, n=n, axis=axis) r_d = da_func(d, s, n=n, axis=axis) assert d.chunks == r_d.chunks da.utils.assert_eq(r_a, r_d) ================================================ FILE: tests/test_dask_image/test_ndinterp/test_affine_transformation.py ================================================ #!/usr/bin/env python # -*- coding: utf-8 -*- from packaging import version import dask import dask.array as da import numpy as np import pytest import scipy import scipy.ndimage import dask_image.ndinterp # mode lists for the case with prefilter = False _supported_modes = ['constant', 'nearest'] _unsupported_modes = ['wrap', 'reflect', 'mirror'] # mode lists for the case with prefilter = True _supported_prefilter_modes = ['constant'] _unsupported_prefilter_modes = _unsupported_modes + ['nearest'] have_scipy16 = version.parse(scipy.__version__) >= version.parse('1.6.0') # additional modes are present in SciPy >= 1.6.0 if have_scipy16: _supported_modes += ['grid-constant'] _unsupported_modes += ['grid-mirror', 'grid-wrap'] _unsupported_prefilter_modes += ['grid-constant', 'grid-mirror', 'grid-wrap'] def validate_affine_transform(n=2, matrix=None, offset=None, input_output_shape_per_dim=(16, 16), interp_order=1, interp_mode='constant', input_output_chunksize_per_dim=(6, 6), random_seed=0, use_cupy=False, prefilter=False ): """ Compare the outputs of `scipy.ndimage.affine_transformation` and `dask_image.ndinterp.affine_transformation`. Notes ----- Currently, prefilter is disabled and therefore the output of `dask_image.ndinterp.affine_transformation` is compared to `prefilter=False`. """ if (interp_order > 1 and interp_mode == 'nearest' and not have_scipy16): # not clear on the underlying cause, but this fails on older SciPy pytest.skip("requires SciPy >= 1.6.0") # define test image a = input_output_shape_per_dim[0] np.random.seed(random_seed) image = np.random.random([a] * n) # transform into dask array chunksize = [input_output_chunksize_per_dim[0]] * n image_da = da.from_array(image, chunks=chunksize) if use_cupy: import cupy as cp image_da = image_da.map_blocks(cp.asarray) if ( prefilter and interp_mode in _supported_prefilter_modes and interp_order > 1 and version.parse(dask.__version__) < version.parse("2020.1.0") ): # older dask will fail if any chunks have size smaller than depth depth = dask_image.ndinterp._get_default_depth(interp_order) in_size = input_output_shape_per_dim[0] in_chunksize = input_output_chunksize_per_dim[0] rem = in_size % in_chunksize if in_size < depth or (rem != 0 and rem < depth): pytest.skip("older dask doesn't automatically rechunk") # define (random) transformation if matrix is None: # make sure to substantially deviate from unity matrix matrix = np.eye(n) + (np.random.random((n, n)) - 0.5) * 5. if offset is None: offset = (np.random.random(n) - 0.5) / 5. * np.array(image.shape) # define resampling options output_shape = [input_output_shape_per_dim[1]] * n output_chunks = [input_output_chunksize_per_dim[1]] * n # transform with scipy image_t_scipy = scipy.ndimage.affine_transform( image, matrix, offset, output_shape=output_shape, order=interp_order, mode=interp_mode, prefilter=prefilter) # transform with dask-image image_t_dask = dask_image.ndinterp.affine_transform( image_da, matrix, offset, output_shape=output_shape, output_chunks=output_chunks, order=interp_order, mode=interp_mode, prefilter=prefilter) image_t_dask_computed = image_t_dask.compute() assert np.allclose(image_t_scipy, image_t_dask_computed) @pytest.mark.parametrize("n", [1, 2, 3]) @pytest.mark.parametrize("input_output_shape_per_dim", [(25, 25)]) @pytest.mark.parametrize("interp_order", range(6)) @pytest.mark.parametrize("input_output_chunksize_per_dim", [(16, 16), (16, 7), (7, 16)]) @pytest.mark.parametrize("random_seed", [0, 2]) def test_affine_transform_general(n, input_output_shape_per_dim, interp_order, input_output_chunksize_per_dim, random_seed): kwargs = dict() kwargs['n'] = n kwargs['input_output_shape_per_dim'] = input_output_shape_per_dim kwargs['interp_order'] = interp_order kwargs['input_output_chunksize_per_dim'] = input_output_chunksize_per_dim kwargs['random_seed'] = random_seed validate_affine_transform(**kwargs) @pytest.mark.cupy @pytest.mark.parametrize("n", [1, 2, 3]) @pytest.mark.parametrize("input_output_shape_per_dim", [(25, 25), (25, 10)]) @pytest.mark.parametrize("interp_order", [0, 1]) @pytest.mark.parametrize("input_output_chunksize_per_dim", [(16, 16), (16, 7)]) @pytest.mark.parametrize("random_seed", [0]) def test_affine_transform_cupy(n, input_output_shape_per_dim, interp_order, input_output_chunksize_per_dim, random_seed): pytest.importorskip("cupy", minversion="5.0.0") kwargs = dict() kwargs['n'] = n kwargs['input_output_shape_per_dim'] = input_output_shape_per_dim kwargs['interp_order'] = interp_order kwargs['input_output_chunksize_per_dim'] = input_output_chunksize_per_dim kwargs['random_seed'] = random_seed kwargs['use_cupy'] = True validate_affine_transform(**kwargs) @pytest.mark.parametrize("n", [1, 2, 3]) @pytest.mark.parametrize("interp_mode", _supported_modes) @pytest.mark.parametrize("interp_order", [0, 3]) @pytest.mark.parametrize("input_output_shape_per_dim", [(20, 30)]) @pytest.mark.parametrize("input_output_chunksize_per_dim", [(15, 10)]) def test_affine_transform_modes(n, interp_mode, interp_order, input_output_shape_per_dim, input_output_chunksize_per_dim, ): kwargs = dict() kwargs['n'] = n kwargs['interp_mode'] = interp_mode kwargs['input_output_shape_per_dim'] = input_output_shape_per_dim kwargs['input_output_chunksize_per_dim'] = input_output_chunksize_per_dim kwargs['interp_order'] = interp_order kwargs['prefilter'] = False validate_affine_transform(**kwargs) @pytest.mark.parametrize("interp_mode", _unsupported_modes) def test_affine_transform_unsupported_modes(interp_mode): with pytest.raises(NotImplementedError): validate_affine_transform(interp_mode=interp_mode) @pytest.mark.parametrize("n", [1, 2, 3]) @pytest.mark.parametrize("interp_order", range(6)) @pytest.mark.parametrize("interp_mode", _supported_prefilter_modes) def test_affine_transform_prefilter_modes(n, interp_order, interp_mode): validate_affine_transform( n=n, input_output_shape_per_dim=(32, 32), input_output_chunksize_per_dim=(24, 24), interp_order=interp_order, interp_mode=interp_mode, prefilter=True, ) @pytest.mark.parametrize("n", [1, 2, 3]) @pytest.mark.parametrize("interp_order", range(2, 6)) @pytest.mark.parametrize("interp_mode", _unsupported_prefilter_modes) def test_affine_transform_prefilter_not_implemented( n, interp_order, interp_mode ): with pytest.raises(NotImplementedError): validate_affine_transform( n=n, interp_order=interp_order, interp_mode=interp_mode, prefilter=True, ) def test_affine_transform_numpy_input(): image = np.ones((3, 3)) image_t = dask_image.ndinterp.affine_transform(image, np.eye(2), [0, 0]) assert image_t.shape == image.shape assert (image == image_t).min() def test_affine_transform_minimal_input(): image = np.ones((3, 3)) image_t = dask_image.ndinterp.affine_transform(np.ones((3, 3)), np.eye(2)) assert image_t.shape == image.shape def test_affine_transform_type_consistency(): image = da.ones((3, 3)) image_t = dask_image.ndinterp.affine_transform(image, np.eye(2), [0, 0]) assert isinstance(image, type(image_t)) assert isinstance(image[0, 0].compute(), type(image_t[0, 0].compute())) @pytest.mark.cupy def test_affine_transform_type_consistency_gpu(): cupy = pytest.importorskip("cupy", minversion="5.0.0") image = da.ones((3, 3)) image_t = dask_image.ndinterp.affine_transform(image, np.eye(2), [0, 0]) image.map_blocks(cupy.asarray) assert isinstance(image, type(image_t)) assert isinstance(image[0, 0].compute(), type(image_t[0, 0].compute())) def test_affine_transform_no_output_shape_or_chunks_specified(): image = da.ones((3, 3)) image_t = dask_image.ndinterp.affine_transform(image, np.eye(2), [0, 0]) assert image_t.shape == image.shape assert image_t.chunks == tuple([(s,) for s in image.shape]) @pytest.mark.timeout(15) def test_affine_transform_large_input_small_output_cpu(): """ Make sure input array does not need to be computed entirely """ # fully computed, this array would occupy 8TB image = da.random.random([10000] * 3, chunks=(200, 200, 200)) image_t = dask_image.ndinterp.affine_transform(image, np.eye(3), [0, 0, 0], output_chunks=[1, 1, 1], output_shape=[1, 1, 1]) # if more than the needed chunks should be computed, # this would take long and eventually raise a MemoryError image_t[0, 0, 0].compute() @pytest.mark.cupy @pytest.mark.timeout(15) def test_affine_transform_large_input_small_output_gpu(): """ Make sure input array does not need to be computed entirely """ cupy = pytest.importorskip("cupy", minversion="5.0.0") # this array would occupy more than 24GB on a GPU image = da.random.random([2000] * 3, chunks=(50, 50, 50)) image.map_blocks(cupy.asarray) image_t = dask_image.ndinterp.affine_transform(image, np.eye(3), [0, 0, 0], output_chunks=[1, 1, 1], output_shape=[1, 1, 1]) # if more than the needed chunks should be computed, # this would take long and eventually raise a MemoryError image_t[0, 0, 0].compute() @pytest.mark.filterwarnings("ignore:The behavior of affine_transform " "with a 1-D array supplied for the matrix " "parameter has changed") @pytest.mark.parametrize("n", [1, 2, 3, 4]) def test_affine_transform_parameter_formats(n): # define reference parameters scale_factors = np.ones(n, dtype=float) * 2. matrix_n = np.diag(scale_factors) offset = -np.ones(n) # convert into different formats matrix_only_scaling = scale_factors matrix_pre_homogeneous = np.hstack((matrix_n, offset[:, None])) matrix_homogeneous = np.vstack((matrix_pre_homogeneous, [0] * n + [1])) np.random.seed(0) image = da.random.random([5] * n) # reference run image_t_0 = dask_image.ndinterp.affine_transform(image, matrix_n, offset).compute() # assert that the different parameter formats # lead to the same output image_t_scale = dask_image.ndinterp.affine_transform(image, matrix_only_scaling, offset).compute() assert np.allclose(image_t_0, image_t_scale) for matrix in [matrix_pre_homogeneous, matrix_homogeneous]: image_t = dask_image.ndinterp.affine_transform(image, matrix, offset + 10., # ignored ).compute() assert np.allclose(image_t_0, image_t) # catch matrices that are not homogeneous transformation matrices with pytest.raises(ValueError): matrix_not_homogeneous = np.vstack((matrix_pre_homogeneous, [-1] * n + [1])) dask_image.ndinterp.affine_transform(image, matrix_not_homogeneous, offset) ================================================ FILE: tests/test_dask_image/test_ndinterp/test_map_coordinates.py ================================================ #!/usr/bin/env python # -*- coding: utf-8 -*- from packaging import version import dask.array as da import numpy as np import pytest import scipy import scipy.ndimage import dask_image.ndinterp # mode lists for the case with prefilter = False _supported_modes = ['constant', 'nearest'] _unsupported_modes = ['wrap', 'reflect', 'mirror'] # mode lists for the case with prefilter = True _supported_prefilter_modes = ['constant'] _unsupported_prefilter_modes = _unsupported_modes + ['nearest'] have_scipy16 = version.parse(scipy.__version__) >= version.parse('1.6.0') # additional modes are present in SciPy >= 1.6.0 if have_scipy16: _supported_modes += ['grid-constant'] _unsupported_modes += ['grid-mirror', 'grid-wrap'] _unsupported_prefilter_modes += ['grid-constant', 'grid-mirror', 'grid-wrap'] def validate_map_coordinates_general(n=2, interp_order=1, interp_mode='constant', coord_len=12, coord_chunksize=6, coord_offset=0., im_shape_per_dim=12, im_chunksize_per_dim=6, random_seed=0, prefilter=False, ): if interp_order > 1 and interp_mode == 'nearest' and not have_scipy16: # not clear on the underlying cause, but this fails on older SciPy pytest.skip("requires SciPy >= 1.6.0") # define test input np.random.seed(random_seed) input = np.random.random([im_shape_per_dim] * n) input_da = da.from_array(input, chunks=im_chunksize_per_dim) # define test coordinates coords = np.random.random((n, coord_len)) * im_shape_per_dim + coord_offset coords_da = da.from_array(coords, chunks=(n, coord_chunksize)) # ndimage result mapped_scipy = scipy.ndimage.map_coordinates( input, coords, order=interp_order, mode=interp_mode, cval=0.0, prefilter=prefilter) # dask-image results for input_array in [input, input_da]: for coords_array in [coords, coords_da]: mapped_dask = dask_image.ndinterp.map_coordinates( input_array, coords_array, order=interp_order, mode=interp_mode, cval=0.0, prefilter=prefilter) mapped_dask_computed = mapped_dask.compute() assert np.allclose(mapped_scipy, mapped_dask_computed) @pytest.mark.parametrize("n", [1, 2, 3, 4]) @pytest.mark.parametrize("random_seed", range(2)) def test_map_coordinates_basic(n, random_seed, ): kwargs = dict() kwargs['n'] = n kwargs['random_seed'] = random_seed validate_map_coordinates_general(**kwargs) @pytest.mark.timeout(3) def test_map_coordinates_large_input(): """ This test assesses whether relatively large inputs are processed before timeout. """ # define large test image image_da = da.random.random([1000] * 3, chunks=200) # define sparse test coordinates coords = np.random.random((3, 2)) * 1000 # dask-image result dask_image.ndinterp.map_coordinates( image_da, coords).compute() @pytest.mark.parametrize("interp_mode", _supported_modes) def test_map_coordinates_out_of_bounds(interp_mode): """ This test checks that an error is raised when out-of-bounds coordinates are used. """ kwargs = dict() kwargs['random_seed'] = 0 kwargs['interp_mode'] = interp_mode kwargs['im_shape_per_dim'] = 10 kwargs['coord_offset'] = 10 # coordinates will be out of bounds validate_map_coordinates_general(**kwargs) ================================================ FILE: tests/test_dask_image/test_ndinterp/test_rotate.py ================================================ #!/usr/bin/env python # -*- coding: utf-8 -*- import numpy as np import dask.array as da import pytest from scipy import ndimage import dask_image.ndinterp as da_ndinterp def validate_rotate(n=2, axes=(0, 1), reshape=False, input_shape_per_dim=16, interp_order=1, interp_mode='constant', input_output_chunksize_per_dim=(6, 6), random_seed=0, use_cupy=False, ): """ Compare the outputs of `ndimage.rotate` and `dask_image.ndinterp.rotate`. """ # define test image np.random.seed(random_seed) image = np.random.random([input_shape_per_dim] * n) angle = np.random.random() * 360 - 180 # transform into dask array chunksize = [input_output_chunksize_per_dim[0]] * n image_da = da.from_array(image, chunks=chunksize) if use_cupy: import cupy as cp image_da = image_da.map_blocks(cp.asarray) # define resampling options output_chunks = [input_output_chunksize_per_dim[1]] * n # transform with dask-image image_t_dask = da_ndinterp.rotate( image, angle, axes=axes, reshape=reshape, order=interp_order, mode=interp_mode, prefilter=False, output_chunks=output_chunks ) image_t_dask_computed = image_t_dask.compute() # transform with scipy image_t_scipy = ndimage.rotate( image, angle, axes=axes, reshape=reshape, order=interp_order, mode=interp_mode, prefilter=False) assert np.allclose(image_t_scipy, image_t_dask_computed) @pytest.mark.parametrize("n", [2, 3]) @pytest.mark.parametrize("input_shape_per_dim", [25, 2]) @pytest.mark.parametrize("interp_order", [0, 1]) @pytest.mark.parametrize("input_output_chunksize_per_dim", [(16, 16), (16, 7), (7, 16)]) @pytest.mark.parametrize("random_seed", [0, 1, 2]) def test_rotate_general(n, input_shape_per_dim, interp_order, input_output_chunksize_per_dim, random_seed): kwargs = dict() kwargs['n'] = n kwargs['input_shape_per_dim'] = input_shape_per_dim kwargs['interp_order'] = interp_order kwargs['input_output_chunksize_per_dim'] = input_output_chunksize_per_dim kwargs['random_seed'] = random_seed validate_rotate(**kwargs) @pytest.mark.cupy @pytest.mark.parametrize("n", [2, 3]) @pytest.mark.parametrize("input_shape_per_dim", [25, 2]) @pytest.mark.parametrize("interp_order", [0, 1]) @pytest.mark.parametrize("input_output_chunksize_per_dim", [(16, 16), (16, 7)]) @pytest.mark.parametrize("random_seed", [0]) def test_rotate_cupy(n, input_shape_per_dim, interp_order, input_output_chunksize_per_dim, random_seed): cupy = pytest.importorskip("cupy", minversion="6.0.0") # noqa: F841 kwargs = dict() kwargs['n'] = n kwargs['input_shape_per_dim'] = input_shape_per_dim kwargs['interp_order'] = interp_order kwargs['input_output_chunksize_per_dim'] = input_output_chunksize_per_dim kwargs['random_seed'] = random_seed kwargs['use_cupy'] = True validate_rotate(**kwargs) @pytest.mark.parametrize("n", [2, 3]) @pytest.mark.parametrize("interp_mode", ['constant', 'nearest']) @pytest.mark.parametrize("input_shape_per_dim", [20, 30]) @pytest.mark.parametrize("input_output_chunksize_per_dim", [(15, 10)]) def test_rotate_modes(n, interp_mode, input_shape_per_dim, input_output_chunksize_per_dim, ): kwargs = dict() kwargs['n'] = n kwargs['interp_mode'] = interp_mode kwargs['input_shape_per_dim'] = input_shape_per_dim kwargs['input_output_chunksize_per_dim'] = input_output_chunksize_per_dim kwargs['interp_order'] = 0 validate_rotate(**kwargs) @pytest.mark.parametrize("interp_mode", ['wrap', 'reflect', 'mirror']) def test_rotate_unsupported_modes(interp_mode): kwargs = dict() kwargs['interp_mode'] = interp_mode with pytest.raises(NotImplementedError): validate_rotate(**kwargs) def test_rotate_dimensions(): with pytest.raises(ValueError): validate_rotate(n=1) @pytest.mark.parametrize("axes", [[1], [1, 2, 3], [-3, 0], [0, -3], [0, 3], [2, 0]]) def test_rotate_axisdimensions(axes): kwargs = dict() kwargs['axes'] = axes with pytest.raises(ValueError): validate_rotate(**kwargs) @pytest.mark.parametrize( "axes", [[1, 2.2], [1, 'a'], [[0, 1], 1], [(0, 1), 1], [0, {}]] ) def test_rotate_axistypes(axes): kwargs = dict() kwargs['axes'] = axes with pytest.raises((ValueError, TypeError)): validate_rotate(**kwargs) @pytest.mark.parametrize( "image", [ np.ones((3, 3)).astype(float), np.ones((3, 3)).astype(int), np.ones((3, 3)).astype(complex), ] ) def test_rotate_dtype(image): image_t = da_ndinterp.rotate(image, 0, reshape=False) assert image_t.dtype == image.dtype def test_rotate_numpy_input(): image = np.ones((3, 3)) image_t = da_ndinterp.rotate(image, 0, reshape=False) assert image_t.shape == image.shape assert (da.from_array(image) == image_t).min() def test_rotate_minimal_input(): image = np.ones((3, 3)) image_t = da_ndinterp.rotate(np.ones((3, 3)), 0) assert image_t.shape == image.shape def test_rotate_type_consistency(): image = da.ones((3, 3)) image_t = da_ndinterp.rotate(image, 0) assert isinstance(image, type(image_t)) assert isinstance(image[0, 0].compute(), type(image_t[0, 0].compute())) @pytest.mark.cupy def test_rotate_type_consistency_gpu(): cupy = pytest.importorskip("cupy", minversion="6.0.0") image = da.ones((3, 3)) image_t = da_ndinterp.rotate(image, 0) image.map_blocks(cupy.asarray) assert isinstance(image, type(image_t)) assert isinstance(image[0, 0].compute(), type(image_t[0, 0].compute())) def test_rotate_no_chunks_specified(): image = da.ones((3, 3)) image_t = da_ndinterp.rotate(image, 0) assert image_t.shape == image.shape assert image_t.chunks == tuple([(s,) for s in image.shape]) def test_rotate_prefilter_not_implemented_error(): with pytest.raises(NotImplementedError): da_ndinterp.rotate( da.ones((15, 15)), 0, order=3, prefilter=True, mode='nearest') ================================================ FILE: tests/test_dask_image/test_ndinterp/test_spline_filter.py ================================================ #!/usr/bin/env python # -*- coding: utf-8 -*- from packaging import version import dask import dask.array as da import numpy as np import pytest import scipy import scipy.ndimage import dask_image.ndinterp # mode lists for the case with prefilter = False _supported_modes = ['constant', 'nearest', 'reflect', 'mirror'] _unsupported_modes = ['wrap'] # additional modes are present in SciPy >= 1.6.0 if version.parse(scipy.__version__) >= version.parse('1.6.0'): _supported_modes += ['grid-constant', 'grid-mirror', 'grid-wrap'] def validate_spline_filter(n=2, axis_size=64, interp_order=3, interp_mode='constant', chunksize=32, output=np.float64, random_seed=0, use_cupy=False, axis=None, input_as_non_dask_array=False, depth=None): """ Compare the outputs of `scipy.ndimage.spline_transform` and `dask_image.ndinterp.spline_transform`. If axis is not None, then `spline_transform1d` is tested instead. """ if ( np.dtype(output) != np.float64 and version.parse(scipy.__version__) < version.parse('1.4.0') ): pytest.skip("bug in output dtype handling in SciPy < 1.4") # define test image np.random.seed(random_seed) image = np.random.random([axis_size] * n) if version.parse(dask.__version__) < version.parse("2020.1.0"): # older dask will fail if any chunks have size smaller than depth _depth = dask_image.ndinterp._get_default_depth(interp_order) rem = axis_size % chunksize if chunksize < _depth or (rem != 0 and rem < _depth): pytest.skip("older dask doesn't automatically rechunk") if input_as_non_dask_array: if use_cupy: import cupy as cp image_da = cp.asarray(image) else: image_da = image else: # transform into dask array image_da = da.from_array(image, chunks=[chunksize] * n) if use_cupy: import cupy as cp image_da = image_da.map_blocks(cp.asarray) if axis is not None: scipy_func = scipy.ndimage.spline_filter1d dask_image_func = dask_image.ndinterp.spline_filter1d kwargs = {'axis': axis} else: scipy_func = scipy.ndimage.spline_filter dask_image_func = dask_image.ndinterp.spline_filter kwargs = {} # transform with scipy image_t_scipy = scipy_func( image, output=output, order=interp_order, mode=interp_mode, **kwargs) # transform with dask-image image_t_dask = dask_image_func( image_da, output=output, order=interp_order, mode=interp_mode, depth=depth, **kwargs) image_t_dask_computed = image_t_dask.compute() rtol = atol = 1e-6 out_dtype = np.dtype(output) assert image_t_scipy.dtype == image_t_dask_computed.dtype == out_dtype assert np.allclose(image_t_scipy, image_t_dask_computed, rtol=rtol, atol=atol) @pytest.mark.parametrize("n", [1, 2, 3]) @pytest.mark.parametrize("axis_size", [64]) @pytest.mark.parametrize("interp_order", range(2, 6)) @pytest.mark.parametrize("interp_mode", _supported_modes) @pytest.mark.parametrize("chunksize", [32, 15]) def test_spline_filter_general( n, axis_size, interp_order, interp_mode, chunksize, ): validate_spline_filter( n=n, axis_size=axis_size, interp_order=interp_order, interp_mode=interp_mode, chunksize=chunksize, axis=None, ) @pytest.mark.cupy @pytest.mark.parametrize("n", [2]) @pytest.mark.parametrize("axis_size", [32]) @pytest.mark.parametrize("interp_order", range(2, 6)) @pytest.mark.parametrize("interp_mode", _supported_modes[::2]) @pytest.mark.parametrize("chunksize", [16]) @pytest.mark.parametrize("axis", [None, -1]) @pytest.mark.parametrize("input_as_non_dask_array", [False, True]) def test_spline_filter_cupy( n, axis_size, interp_order, interp_mode, chunksize, axis, input_as_non_dask_array, ): pytest.importorskip("cupy", minversion="9.0.0") validate_spline_filter( n=n, axis_size=axis_size, interp_order=interp_order, interp_mode=interp_mode, chunksize=chunksize, axis=axis, input_as_non_dask_array=input_as_non_dask_array, use_cupy=True, ) @pytest.mark.parametrize("n", [1, 2, 3]) @pytest.mark.parametrize("axis_size", [48, 27]) @pytest.mark.parametrize("interp_order", range(2, 6)) @pytest.mark.parametrize("interp_mode", _supported_modes) @pytest.mark.parametrize("chunksize", [33]) @pytest.mark.parametrize("axis", [0, 1, -1]) def test_spline_filter1d_general( n, axis_size, interp_order, interp_mode, chunksize, axis, ): if axis == 1 and n < 2: pytest.skip("skip axis=1 for 1d signals") validate_spline_filter( n=n, axis_size=axis_size, interp_order=interp_order, interp_mode=interp_mode, chunksize=chunksize, axis=axis, ) @pytest.mark.parametrize("axis", [None, -1]) def test_spline_filter_non_dask_array_input(axis): validate_spline_filter( axis=axis, input_as_non_dask_array=True, ) @pytest.mark.parametrize("depth", [None, 24]) @pytest.mark.parametrize("axis", [None, -1]) def test_spline_filter_non_default_depth(depth, axis): validate_spline_filter( axis=axis, depth=depth, ) @pytest.mark.parametrize("depth", [(16, 32), [18]]) def test_spline_filter1d_invalid_depth(depth): with pytest.raises(ValueError): validate_spline_filter( axis=-1, depth=depth, ) @pytest.mark.parametrize("axis_size", [32]) @pytest.mark.parametrize("interp_order", range(2, 6)) @pytest.mark.parametrize("interp_mode", _unsupported_modes) @pytest.mark.parametrize("axis", [None, -1]) def test_spline_filter_unsupported_modes( axis_size, interp_order, interp_mode, axis, ): with pytest.raises(NotImplementedError): validate_spline_filter( axis_size=axis_size, interp_order=interp_order, interp_mode=interp_mode, axis=axis, ) @pytest.mark.parametrize( "output", [np.float64, np.float32, "float32", np.dtype(np.float32)] ) @pytest.mark.parametrize("axis", [None, -1]) def test_spline_filter_output_dtype(output, axis): validate_spline_filter( axis_size=32, interp_order=3, output=output, axis=axis, ) @pytest.mark.parametrize("axis", [None, -1]) def test_spline_filter_array_output_unsupported(axis): n = 2 axis_size = 32 shape = (axis_size,) * n with pytest.raises(TypeError): validate_spline_filter( n=n, axis_size=axis_size, interp_order=3, output=np.empty(shape), axis=axis, ) ================================================ FILE: tests/test_dask_image/test_ndmeasure/__init__.py ================================================ # -*- coding: utf-8 -*- ================================================ FILE: tests/test_dask_image/test_ndmeasure/test__utils.py ================================================ #!/usr/bin/env python # -*- coding: utf-8 -*- import pytest import numpy as np import dask.array as da import dask_image.ndmeasure._utils def test__norm_input_labels_index_err(): shape = (15, 16) chunks = (4, 5) ind = None a = np.random.random(shape) d = da.from_array(a, chunks=chunks) lbls = (a < 0.5).astype(np.int64) d_lbls = da.from_array(lbls, chunks=d.chunks) lbls = lbls[:-1] d_lbls = d_lbls[:-1] with pytest.raises(ValueError): dask_image.ndmeasure._utils._norm_input_labels_index(d, d_lbls, ind) def test__norm_input_labels_index(): shape = (15, 16) chunks = (4, 5) ind = None a = np.random.random(shape) d = da.from_array(a, chunks=chunks) lbls = (a < 0.5).astype(int) d_lbls = da.from_array(lbls, chunks=d.chunks) d_n, d_lbls_n, ind_n = dask_image.ndmeasure._utils._norm_input_labels_index( # noqa: E501 d, d_lbls, ind ) assert isinstance(d_n, da.Array) assert isinstance(d_lbls_n, da.Array) assert isinstance(ind_n, da.Array) assert d_n.shape == d.shape assert d_lbls_n.shape == d_lbls.shape assert ind_n.shape == () da.utils.assert_eq(d_n, d) da.utils.assert_eq(d_lbls_n, d_lbls) da.utils.assert_eq(ind_n, np.array(1, dtype=int)) @pytest.mark.parametrize( "shape, chunks, ind", [ ((15, 16), (4, 5), [[1, 2, 3, 4]]), ((15, 16), (4, 5), [[1, 2], [3, 4]]), ((15, 16), (4, 5), [[[1], [2], [3], [4]]]), ] ) def test__norm_input_labels_index_warn(shape, chunks, ind): a = np.random.random(shape) d = da.from_array(a, chunks=chunks) lbls = np.zeros(a.shape, dtype=np.int64) lbls += ( (a < 0.5).astype(lbls.dtype) + (a < 0.25).astype(lbls.dtype) + (a < 0.125).astype(lbls.dtype) + (a < 0.0625).astype(lbls.dtype) ) d_lbls = da.from_array(lbls, chunks=d.chunks) ind = np.array(ind) d_ind = da.from_array(ind, chunks=1) with pytest.warns(FutureWarning) as w: dask_image.ndmeasure._utils._norm_input_labels_index( d, d_lbls, d_ind ) if ind.ndim > 1: assert len(w) == 1 w.pop(FutureWarning) else: assert len(w) == 0 @pytest.mark.parametrize( "shape, chunks", [ ((15,), (4,)), ((15, 16), (4, 5)), ((15, 1, 16), (4, 1, 5)), ((15, 12, 16), (4, 5, 6)), ] ) def test___ravel_shape_indices(shape, chunks): a = np.arange(int(np.prod(shape)), dtype=np.int64).reshape(shape) d = dask_image.ndmeasure._utils._ravel_shape_indices( shape, dtype=np.int64, chunks=chunks ) da.utils.assert_eq(d, a) ================================================ FILE: tests/test_dask_image/test_ndmeasure/test_core.py ================================================ #!/usr/bin/env python # -*- coding: utf-8 -*- import itertools as it import warnings as wrn import pytest import numpy as np import scipy import scipy.ndimage import dask.array as da import dask_image.ndmeasure @pytest.mark.parametrize( "funcname", [ "center_of_mass", "extrema", "maximum", "maximum_position", "mean", "median", "minimum", "minimum_position", "standard_deviation", "sum_labels", "variance", ] ) def test_measure_props_err(funcname): da_func = getattr(dask_image.ndmeasure, funcname) shape = (15, 16) chunks = (4, 5) ind = None a = np.random.random(shape) d = da.from_array(a, chunks=chunks) lbls = (a < 0.5).astype(np.int64) d_lbls = da.from_array(lbls, chunks=d.chunks) lbls = lbls[:-1] d_lbls = d_lbls[:-1] with pytest.raises(ValueError): da_func(d, lbls, ind) @pytest.mark.parametrize( "datatype", [ int, float, np.bool_, np.uint8, np.uint16, np.uint32, np.uint64, np.int16, np.int32, np.int64, np.float32, np.float64, ] ) def test_center_of_mass(datatype): a = np.array([[1, 1], [0, 0]]).astype(datatype) d = da.from_array(a, chunks=(1, 2)) actual = dask_image.ndmeasure.center_of_mass(d).compute() expected = [0., 0.5] assert np.allclose(actual, expected) @pytest.mark.parametrize( "funcname", [ "center_of_mass", "maximum", "maximum_position", "mean", "median", "minimum", "minimum_position", "standard_deviation", "sum_labels", "variance", ] ) @pytest.mark.parametrize( "shape, chunks, has_lbls, ind", [ ((5, 6, 4), (2, 3, 2), False, None), ((15, 16), (4, 5), False, None), ((15, 16), (4, 5), True, None), ((15, 16), (4, 5), True, 0), ((15, 16), (4, 5), True, 1), ((15, 16), (4, 5), True, [1]), ((15, 16), (4, 5), True, [1, 2]), ((5, 6, 4), (2, 3, 2), True, [1, 2]), ((15, 16), (4, 5), True, [1, 100]), ((5, 6, 4), (2, 3, 2), True, [1, 100]), ((15, 16), (4, 5), True, [[1, 2, 3, 4]]), ((15, 16), (4, 5), True, [[1, 2], [3, 4]]), ((15, 16), (4, 5), True, [[[1], [2], [3], [4]]]), ] ) def test_measure_props(funcname, shape, chunks, has_lbls, ind): sp_func = getattr(scipy.ndimage, funcname) da_func = getattr(dask_image.ndmeasure, funcname) a = np.random.random(shape) d = da.from_array(a, chunks=chunks) lbls = None d_lbls = None if has_lbls: lbls = np.zeros(a.shape, dtype=np.int64) lbls += ( (a < 0.5).astype(lbls.dtype) + (a < 0.25).astype(lbls.dtype) + (a < 0.125).astype(lbls.dtype) + (a < 0.0625).astype(lbls.dtype) ) d_lbls = da.from_array(lbls, chunks=d.chunks) a_r = np.array(sp_func(a, lbls, ind)) d_r = da_func(d, d_lbls, ind) if a_r.dtype != d_r.dtype: wrn.warn( "Encountered a type mismatch." " Expected type, %s, but got type, %s." "" % (str(a_r.dtype), str(d_r.dtype)), RuntimeWarning ) assert a_r.shape == d_r.shape # See the linked issue for details. # ref: https://github.com/scipy/scipy/issues/7706 if ( funcname == "median" and ind is not None and not np.isin(np.atleast_1d(ind), lbls).all() ): pytest.skip("SciPy's `median` mishandles missing labels.") assert np.allclose(np.array(a_r), np.array(d_r), equal_nan=True) @pytest.mark.parametrize( "shape, chunks, has_lbls, ind", [ ((15, 16), (4, 5), False, None), ((5, 6, 4), (2, 3, 2), False, None), ((15, 16), (4, 5), True, None), ((15, 16), (4, 5), True, 0), ((15, 16), (4, 5), True, 1), ((15, 16), (4, 5), True, [1]), ((15, 16), (4, 5), True, [1, 2]), ((5, 6, 4), (2, 3, 2), True, [1, 2]), ((15, 16), (4, 5), True, [1, 100]), ((5, 6, 4), (2, 3, 2), True, [1, 100]), ((15, 16), (4, 5), True, [[1, 2, 3, 4]]), ((15, 16), (4, 5), True, [[1, 2], [3, 4]]), ((15, 16), (4, 5), True, [[[1], [2], [3], [4]]]), ] ) def test_area(shape, chunks, has_lbls, ind): a = np.random.random(shape) d = da.from_array(a, chunks=chunks) lbls = None d_lbls = None if has_lbls: lbls = np.zeros(a.shape, dtype=np.int64) lbls += ( (a < 0.5).astype(lbls.dtype) + (a < 0.25).astype(lbls.dtype) + (a < 0.125).astype(lbls.dtype) + (a < 0.0625).astype(lbls.dtype) ) d_lbls = da.from_array(lbls, chunks=d.chunks) a_r = None if has_lbls: if ind is None: a_r = lbls.astype(bool).astype(np.int64).sum() else: a_r = np.bincount( lbls.flatten(), minlength=(1 + max(np.array(ind).flatten())) ) a_r = a_r[np.asarray(ind)] else: a_r = np.array(a.size)[()] d_r = dask_image.ndmeasure.area(d, d_lbls, ind) assert np.allclose(np.array(a_r), np.array(d_r), equal_nan=True) @pytest.mark.parametrize( "shape, chunks, has_lbls, ind", [ ((15, 16), (4, 5), False, None), ((5, 6, 4), (2, 3, 2), False, None), ((15, 16), (4, 5), True, None), ((15, 16), (4, 5), True, 0), ((15, 16), (4, 5), True, 1), ((15, 16), (4, 5), True, [1]), ((15, 16), (4, 5), True, [1, 2]), ((5, 6, 4), (2, 3, 2), True, [1, 2]), ((15, 16), (4, 5), True, [1, 100]), ((5, 6, 4), (2, 3, 2), True, [1, 100]), ((15, 16), (4, 5), True, [[1, 2, 3, 4]]), ((15, 16), (4, 5), True, [[1, 2], [3, 4]]), ((15, 16), (4, 5), True, [[[1], [2], [3], [4]]]), ] ) def test_extrema(shape, chunks, has_lbls, ind): a = np.random.random(shape) d = da.from_array(a, chunks=chunks) lbls = None d_lbls = None if has_lbls: lbls = np.zeros(a.shape, dtype=np.int64) lbls += ( (a < 0.5).astype(lbls.dtype) + (a < 0.25).astype(lbls.dtype) + (a < 0.125).astype(lbls.dtype) + (a < 0.0625).astype(lbls.dtype) ) d_lbls = da.from_array(lbls, chunks=d.chunks) a_r = scipy.ndimage.extrema(a, lbls, ind) d_r = dask_image.ndmeasure.extrema(d, d_lbls, ind) assert len(a_r) == len(d_r) for i in range(len(a_r)): a_r_i = np.array(a_r[i]) if a_r_i.dtype != d_r[i].dtype: wrn.warn( "Encountered a type mismatch." " Expected type, %s, but got type, %s." "" % (str(a_r_i.dtype), str(d_r[i].dtype)), RuntimeWarning ) assert a_r_i.shape == d_r[i].shape assert np.allclose(a_r_i, np.array(d_r[i]), equal_nan=True) @pytest.mark.parametrize( "shape, chunks, has_lbls, ind", [ ((15, 16), (4, 5), False, None), ((5, 6, 4), (2, 3, 2), False, None), ((15, 16), (4, 5), True, None), ((15, 16), (4, 5), True, 0), ((15, 16), (4, 5), True, 1), ((15, 16), (4, 5), True, 100), ((15, 16), (4, 5), True, [1]), ((15, 16), (4, 5), True, [1, 2]), ((5, 6, 4), (2, 3, 2), True, [1, 2]), ((15, 16), (4, 5), True, [1, 100]), ((5, 6, 4), (2, 3, 2), True, [1, 100]), ] ) @pytest.mark.parametrize( "min, max, bins", [ (0, 1, 5), ] ) def test_histogram(shape, chunks, has_lbls, ind, min, max, bins): a = np.random.random(shape) d = da.from_array(a, chunks=chunks) lbls = None d_lbls = None if has_lbls: lbls = np.zeros(a.shape, dtype=np.int64) lbls += ( (a < 0.5).astype(lbls.dtype) + (a < 0.25).astype(lbls.dtype) + (a < 0.125).astype(lbls.dtype) + (a < 0.0625).astype(lbls.dtype) ) d_lbls = da.from_array(lbls, chunks=d.chunks) a_r = scipy.ndimage.histogram(a, min, max, bins, lbls, ind) d_r = dask_image.ndmeasure.histogram(d, min, max, bins, d_lbls, ind) if ind is None or np.isscalar(ind): if a_r is None: assert d_r.compute() is None else: np.allclose(a_r, d_r.compute(), equal_nan=True) else: assert a_r.dtype == d_r.dtype assert a_r.shape == d_r.shape for i in it.product(*[range(_) for _ in a_r.shape]): if a_r[i] is None: assert d_r[i].compute() is None else: assert np.allclose(a_r[i], d_r[i].compute(), equal_nan=True) def _assert_equivalent_labeling(labels0, labels1): """Make sure the two label arrays are equivalent. In the sense that if two pixels have the same label in labels0, they will also have the same label in labels1, and vice-versa. We check this by verifying that there is exactly a one-to-one mapping between the two label volumes. """ matching = np.stack((labels0.ravel(), labels1.ravel()), axis=1) unique_matching = dask_image.ndmeasure._label._unique_axis(matching) bincount0 = np.bincount(unique_matching[:, 0]) bincount1 = np.bincount(unique_matching[:, 1]) assert np.all(bincount0 == 1) assert np.all(bincount1 == 1) @pytest.mark.parametrize( "seed, prob, shape, chunks, connectivity", [ (42, 0.4, (15, 16), (15, 16), 1), (42, 0.4, (15, 16), (4, 5), 1), (42, 0.4, (15, 16), (4, 5), 2), (42, 0.4, (15, 16), (4, 5), None), (42, 0.4, (15, 16), (8, 5), 1), (42, 0.4, (15, 16), (8, 5), 2), (42, 0.3, (10, 8, 6), (5, 4, 3), 1), (42, 0.3, (10, 8, 6), (5, 4, 3), 2), (42, 0.3, (10, 8, 6), (5, 4, 3), 3), ] ) def test_label(seed, prob, shape, chunks, connectivity): np.random.seed(seed) a = np.random.random(shape) < prob d = da.from_array(a, chunks=chunks) if connectivity is None: s = None else: s = scipy.ndimage.generate_binary_structure(a.ndim, connectivity) a_l, a_nl = scipy.ndimage.label(a, s) d_l, d_nl = dask_image.ndmeasure.label(d, s) assert a_nl == d_nl.compute() assert a_l.dtype == d_l.dtype assert a_l.shape == d_l.shape _assert_equivalent_labeling(a_l, d_l.compute()) a = np.array( [ [0, 0, 1, 0, 0, 1, 1, 0, 0, 0], [0, 0, 1, 0, 0, 0, 0, 0, 0, 0], [1, 1, 0, 0, 0, 0, 1, 1, 1, 1], [1, 1, 0, 0, 0, 0, 1, 1, 1, 1], [1, 0, 0, 0, 1, 0, 1, 1, 1, 0], [0, 1, 0, 0, 1, 0, 1, 1, 1, 0], [0, 0, 1, 0, 1, 0, 0, 0, 0, 0], [0, 0, 0, 0, 0, 0, 0, 0, 0, 0], [0, 0, 0, 0, 0, 0, 0, 0, 0, 0], [0, 0, 1, 0, 0, 1, 1, 0, 0, 0], ] ) @pytest.mark.parametrize( "a, a_res, wrap_axes, connectivity, chunks", [ pytest.param( a, np.array( [ [0, 0, 1, 0, 0, 3, 3, 0, 0, 0], [0, 0, 1, 0, 0, 0, 0, 0, 0, 0], [1, 1, 0, 0, 0, 0, 1, 1, 1, 1], [1, 1, 0, 0, 0, 0, 1, 1, 1, 1], [1, 0, 0, 0, 2, 0, 1, 1, 1, 0], [0, 1, 0, 0, 2, 0, 1, 1, 1, 0], [0, 0, 1, 0, 2, 0, 0, 0, 0, 0], [0, 0, 0, 0, 0, 0, 0, 0, 0, 0], [0, 0, 0, 0, 0, 0, 0, 0, 0, 0], [0, 0, 4, 0, 0, 5, 5, 0, 0, 0], ] ), (1,), 2, (5, 5), id="2d, wrapping 1st axis.", ), pytest.param( a, np.array( [ [0, 0, 1, 0, 0, 3, 3, 0, 0, 0], [0, 0, 1, 0, 0, 0, 0, 0, 0, 0], [1, 1, 0, 0, 0, 0, 4, 4, 4, 4], [1, 1, 0, 0, 0, 0, 4, 4, 4, 4], [1, 0, 0, 0, 2, 0, 4, 4, 4, 0], [0, 1, 0, 0, 2, 0, 4, 4, 4, 0], [0, 0, 1, 0, 2, 0, 0, 0, 0, 0], [0, 0, 0, 0, 0, 0, 0, 0, 0, 0], [0, 0, 0, 0, 0, 0, 0, 0, 0, 0], [0, 0, 1, 0, 0, 3, 3, 0, 0, 0], ] ), (0,), 2, (5, 5), id="2d, wrapping 0th axes.", ), pytest.param( a, np.array( [ [0, 0, 1, 0, 0, 3, 3, 0, 0, 0], [0, 0, 1, 0, 0, 0, 0, 0, 0, 0], [1, 1, 0, 0, 0, 0, 1, 1, 1, 1], [1, 1, 0, 0, 0, 0, 1, 1, 1, 1], [1, 0, 0, 0, 2, 0, 1, 1, 1, 0], [0, 1, 0, 0, 2, 0, 1, 1, 1, 0], [0, 0, 1, 0, 2, 0, 0, 0, 0, 0], [0, 0, 0, 0, 0, 0, 0, 0, 0, 0], [0, 0, 0, 0, 0, 0, 0, 0, 0, 0], [0, 0, 1, 0, 0, 3, 3, 0, 0, 0], ] ), (0, 1), 2, (5, 5), id="2d, wrapping both axes", ), pytest.param( np.array([[1, 0, 0, 0, 0], [0, 0, 0, 0, 0], [0, 0, 0, 0, 1]]), np.array([[1, 0, 0, 0, 0], [0, 0, 0, 0, 0], [0, 0, 0, 0, 1]]), (0, 1), 2, "auto", id="2d, full wrap, high connectivity (corners).", ), pytest.param( np.array([[1, 0, 0, 0, 0], [0, 0, 0, 0, 0], [0, 0, 0, 0, 1]]), # Corners should not be connected for lower connectivity. np.array([[1, 0, 0, 0, 0], [0, 0, 0, 0, 0], [0, 0, 0, 0, 2]]), (0, 1), 1, "auto", id="2d, full wrap, low connectivity (no corners).", ), # 3d pytest.param( np.array( [ [[0, 0, 0, 0, 0], [1, 0, 0, 0, 1], [0, 0, 0, 0, 0]], [[0, 0, 0, 0, 0], [0, 0, 0, 0, 0], [0, 0, 0, 0, 0]], [[0, 0, 0, 0, 0], [1, 0, 0, 0, 1], [1, 0, 0, 0, 1]], ] ), np.array( [ [[0, 0, 0, 0, 0], [1, 0, 0, 0, 2], [0, 0, 0, 0, 0]], [[0, 0, 0, 0, 0], [0, 0, 0, 0, 0], [0, 0, 0, 0, 0]], [[0, 0, 0, 0, 0], [3, 0, 0, 0, 4], [3, 0, 0, 0, 4]], ] ), None, 3, "auto", id="3d no wrap", ), pytest.param( np.array( [ [[0, 0, 0, 0, 0], [1, 0, 0, 0, 1], [0, 0, 0, 0, 0]], [[0, 0, 0, 0, 0], [0, 0, 0, 0, 0], [0, 0, 0, 0, 0]], [[0, 0, 0, 0, 0], [1, 0, 0, 0, 1], [1, 0, 0, 0, 1]], ] ), np.array( [ [[0, 0, 0, 0, 0], [1, 0, 0, 0, 1], [0, 0, 0, 0, 0]], [[0, 0, 0, 0, 0], [0, 0, 0, 0, 0], [0, 0, 0, 0, 0]], [[0, 0, 0, 0, 0], [2, 0, 0, 0, 2], [2, 0, 0, 0, 2]], ] ), (2,), 3, "auto", id="3d wrap 2nd axis", ), pytest.param( np.array( [ [ [0, 0, 0, 0, 1], [0, 0, 0, 0, 0], [0, 0, 0, 0, 0], [0, 0, 0, 0, 0], [0, 0, 0, 0, 0], [0, 0, 0, 0, 0], [1, 0, 0, 0, 0], ], [ [0, 0, 0, 0, 0], [0, 0, 0, 0, 0], [0, 0, 0, 0, 0], [1, 0, 0, 0, 1], [0, 0, 0, 0, 0], [0, 0, 0, 0, 0], [0, 0, 0, 0, 0], ], [ [0, 0, 0, 0, 0], [0, 0, 0, 0, 0], [0, 0, 0, 0, 0], [0, 0, 0, 0, 0], [0, 0, 0, 0, 0], [0, 0, 0, 0, 0], [1, 0, 0, 0, 1], ], ] ), np.array( [ [ [0, 0, 0, 0, 1], [0, 0, 0, 0, 0], [0, 0, 0, 0, 0], [0, 0, 0, 0, 0], [0, 0, 0, 0, 0], [0, 0, 0, 0, 0], [1, 0, 0, 0, 0], ], [ [0, 0, 0, 0, 0], [0, 0, 0, 0, 0], [0, 0, 0, 0, 0], [2, 0, 0, 0, 2], [0, 0, 0, 0, 0], [0, 0, 0, 0, 0], [0, 0, 0, 0, 0], ], [ [0, 0, 0, 0, 0], [0, 0, 0, 0, 0], [0, 0, 0, 0, 0], [0, 0, 0, 0, 0], [0, 0, 0, 0, 0], [0, 0, 0, 0, 0], [3, 0, 0, 0, 3], ], ] ), (1, 2), 3, "auto", id="3d, wrap 1st and 2nd axis, with corners", ), pytest.param( np.array( [ [ [0, 0, 0, 0, 1], [0, 0, 0, 0, 0], [0, 0, 0, 0, 0], [0, 0, 0, 0, 0], [0, 0, 0, 0, 0], [0, 0, 0, 0, 0], [1, 0, 0, 0, 0], ], [ [0, 0, 0, 0, 1], [0, 0, 0, 0, 0], [0, 0, 0, 0, 0], [1, 0, 0, 0, 1], [0, 0, 0, 0, 0], [0, 0, 0, 0, 0], [0, 0, 0, 0, 0], ], [ [0, 0, 0, 0, 0], [0, 0, 0, 0, 0], [0, 0, 0, 0, 0], [0, 0, 0, 0, 0], [0, 0, 0, 0, 0], [0, 0, 0, 0, 0], [1, 0, 0, 0, 1], ], ] ), np.array( [ [ [0, 0, 0, 0, 1], [0, 0, 0, 0, 0], [0, 0, 0, 0, 0], [0, 0, 0, 0, 0], [0, 0, 0, 0, 0], [0, 0, 0, 0, 0], [1, 0, 0, 0, 0], ], [ [0, 0, 0, 0, 1], [0, 0, 0, 0, 0], [0, 0, 0, 0, 0], [2, 0, 0, 0, 2], [0, 0, 0, 0, 0], [0, 0, 0, 0, 0], [0, 0, 0, 0, 0], ], [ [0, 0, 0, 0, 0], [0, 0, 0, 0, 0], [0, 0, 0, 0, 0], [0, 0, 0, 0, 0], [0, 0, 0, 0, 0], [0, 0, 0, 0, 0], [1, 0, 0, 0, 1], ], ] ), (1, 2), 3, "auto", id="3d, with corners, connection through adjacent timesteps.", ), ], ) def test_label_wrap(a, a_res, wrap_axes, connectivity, chunks): d = da.from_array(a, chunks=chunks) s = scipy.ndimage.generate_binary_structure(a.ndim, connectivity) d_l, _ = dask_image.ndmeasure.label(d, s, wrap_axes=wrap_axes) _assert_equivalent_labeling(a_res, d_l.compute()) @pytest.mark.parametrize( "ndim", (2, 3, 4, 5) ) def test_label_full_struct_element(ndim): full_s = scipy.ndimage.generate_binary_structure(ndim, ndim) orth_s = scipy.ndimage.generate_binary_structure(ndim, ndim - 1) # create a mask that represents a single connected component # under the full (highest rank) structuring element # but several connected components under the orthogonal # structuring element mask = full_s ^ orth_s mask[tuple([1] * ndim)] = True # create dask array with chunk boundary # that passes through the mask mask_da = da.from_array(mask, chunks=[2] * ndim) labels_ndi, N_ndi = scipy.ndimage.label(mask, structure=full_s) labels_di_da, N_di_da = dask_image.ndmeasure.label( mask_da, structure=full_s) assert N_ndi == N_di_da.compute() _assert_equivalent_labeling( labels_ndi, labels_di_da.compute()) @pytest.mark.parametrize( "shape, chunks, ind", [ ((15, 16), (4, 5), None), ((5, 6, 4), (2, 3, 2), None), ((15, 16), (4, 5), 0), ((15, 16), (4, 5), 1), ((15, 16), (4, 5), [1]), ((15, 16), (4, 5), [1, 2]), ((5, 6, 4), (2, 3, 2), [1, 2]), ((15, 16), (4, 5), [1, 100]), ((5, 6, 4), (2, 3, 2), [1, 100]), ] ) @pytest.mark.parametrize( "default", [ None, 0, 1.5, ] ) @pytest.mark.parametrize( "pass_positions", [ False, True, ] ) def test_labeled_comprehension(shape, chunks, ind, default, pass_positions): a = np.random.random(shape) d = da.from_array(a, chunks=chunks) lbls = np.zeros(a.shape, dtype=np.int64) lbls += ( (a < 0.5).astype(lbls.dtype) + (a < 0.25).astype(lbls.dtype) + (a < 0.125).astype(lbls.dtype) + (a < 0.0625).astype(lbls.dtype) ) d_lbls = da.from_array(lbls, chunks=d.chunks) def func(val, pos=None): if pos is None: pos = 0 * val + 1 return (val * pos).sum() / (1 + val.max() * pos.max()) a_cm = scipy.ndimage.labeled_comprehension( a, lbls, ind, func, np.float64, default, pass_positions ) d_cm = dask_image.ndmeasure.labeled_comprehension( d, d_lbls, ind, func, np.float64, default, pass_positions ) assert a_cm.dtype == d_cm.dtype assert a_cm.shape == d_cm.shape assert np.allclose(np.array(a_cm), np.array(d_cm), equal_nan=True) @pytest.mark.parametrize( "shape, chunks, ind", [ ((15, 16), (4, 5), None), ((5, 6, 4), (2, 3, 2), None), ((15, 16), (4, 5), 0), ((15, 16), (4, 5), 1), ((15, 16), (4, 5), [1]), ((15, 16), (4, 5), [1, 2]), ((5, 6, 4), (2, 3, 2), [1, 2]), ((15, 16), (4, 5), [1, 100]), ((5, 6, 4), (2, 3, 2), [1, 100]), ] ) def test_labeled_comprehension_struct(shape, chunks, ind): a = np.random.random(shape) d = da.from_array(a, chunks=chunks) lbls = np.zeros(a.shape, dtype=np.int64) lbls += ( (a < 0.5).astype(lbls.dtype) + (a < 0.25).astype(lbls.dtype) + (a < 0.125).astype(lbls.dtype) + (a < 0.0625).astype(lbls.dtype) ) d_lbls = da.from_array(lbls, chunks=d.chunks) dtype = np.dtype([("val", np.float64), ("pos", int)]) default = np.array((np.nan, -1), dtype=dtype) def func_max(val): return np.max(val) def func_argmax(val, pos): return pos[np.argmax(val)] def func_max_argmax(val, pos): result = np.empty((), dtype=dtype) i = np.argmax(val) result["val"] = val[i] result["pos"] = pos[i] return result[()] a_max = scipy.ndimage.labeled_comprehension( a, lbls, ind, func_max, dtype["val"], default["val"], False ) a_argmax = scipy.ndimage.labeled_comprehension( a, lbls, ind, func_argmax, dtype["pos"], default["pos"], True ) d_max_argmax = dask_image.ndmeasure.labeled_comprehension( d, d_lbls, ind, func_max_argmax, dtype, default, True ) d_max = d_max_argmax["val"] d_argmax = d_max_argmax["pos"] assert dtype == d_max_argmax.dtype for e_a_r, e_d_r in zip([a_max, a_argmax], [d_max, d_argmax]): assert e_a_r.dtype == e_d_r.dtype assert e_a_r.shape == e_d_r.shape assert np.allclose(np.array(e_a_r), np.array(e_d_r), equal_nan=True) @pytest.mark.parametrize( "shape, chunks, ind", [ ((15, 16), (4, 5), None), ((5, 6, 4), (2, 3, 2), None), ((15, 16), (4, 5), 0), ((15, 16), (4, 5), 1), ((15, 16), (4, 5), [1]), ((15, 16), (4, 5), [1, 2]), ((5, 6, 4), (2, 3, 2), [1, 2]), ((15, 16), (4, 5), [1, 100]), ((5, 6, 4), (2, 3, 2), [1, 100]), ] ) def test_labeled_comprehension_object(shape, chunks, ind): a = np.random.random(shape) d = da.from_array(a, chunks=chunks) lbls = np.zeros(a.shape, dtype=np.int64) lbls += ( (a < 0.5).astype(lbls.dtype) + (a < 0.25).astype(lbls.dtype) + (a < 0.125).astype(lbls.dtype) + (a < 0.0625).astype(lbls.dtype) ) d_lbls = da.from_array(lbls, chunks=d.chunks) dtype = np.dtype(object) default = None def func_min_max(val): return np.array([np.min(val), np.max(val)]) a_r = scipy.ndimage.labeled_comprehension( a, lbls, ind, func_min_max, dtype, default, False ) d_r = dask_image.ndmeasure.labeled_comprehension( d, d_lbls, ind, func_min_max, dtype, default, False ) if ind is None or np.isscalar(ind): if a_r is None: assert d_r.compute() is None else: np.allclose(a_r, d_r.compute(), equal_nan=True) else: assert a_r.dtype == d_r.dtype assert a_r.shape == d_r.shape for i in it.product(*[range(_) for _ in a_r.shape]): if a_r[i] is None: assert d_r[i].compute() is None else: assert np.allclose(a_r[i], d_r[i].compute(), equal_nan=True) ================================================ FILE: tests/test_dask_image/test_ndmeasure/test_find_objects.py ================================================ import pytest pd = pytest.importorskip("pandas") dd = pytest.importorskip("dask.dataframe") import dask.array as da # noqa: E402 import numpy as np # noqa: E402 import dask_image.ndmeasure # noqa: E402 @pytest.fixture def label_image(): """Return small label image for tests. dask.array array([[ 0, 0, 0, 0, 0, 0, 0, 333, 333, 333], [111, 111, 0, 0, 0, 0, 0, 333, 333, 333], [111, 111, 0, 0, 0, 0, 0, 0, 0, 0], [ 0, 0, 0, 222, 222, 222, 222, 222, 222, 0], [ 0, 0, 0, 0, 0, 0, 0, 0, 0, 0]]) """ # noqa: E501 label_image = np.zeros((5, 10)).astype(int) label_image[1:3, 0:2] = 111 label_image[3, 3:-2] = 222 label_image[0:2, -3:] = 333 label_image = da.from_array(label_image, chunks=(5, 5)) return label_image @pytest.fixture def label_image_with_empty_chunk(): """Return small label image with an empty chunk for tests. dask.array array([[ 0, 0, 0, 0, 0, 0], [111, 111, 0, 0, 0, 0], [111, 111, 0, 0, 0, 0], [ 0, 0, 0, 0, 0, 0], [ 0, 0, 0, 222, 222, 222], [ 0, 0, 0, 0, 0, 0]]) """ # noqa: E501 label_image = np.zeros((6, 6)).astype(int) label_image[1:3, 0:2] = 111 label_image[4, 3:] = 222 label_image = da.from_array(label_image, chunks=(3, 3)) return label_image def test_find_objects_err(label_image): label_image = label_image.astype(float) with pytest.raises(ValueError): dask_image.ndmeasure.find_objects(label_image) def test_empty_chunk(): test_labels = da.zeros((10, 10), dtype='int', chunks=(3, 3)) test_labels[0, 0] = 1 computed_result = dask_image.ndmeasure.find_objects(test_labels).compute() expected = pd.DataFrame.from_dict({0: {1: slice(0, 1)}, 1: {1: slice(0, 1)}, }) assert computed_result.equals(expected) def test_find_objects(label_image): result = dask_image.ndmeasure.find_objects(label_image) assert isinstance(result, dd.DataFrame) computed_result = result.compute() assert isinstance(computed_result, pd.DataFrame) expected = pd.DataFrame.from_dict({ 0: {111: slice(1, 3), 222: slice(3, 4), 333: slice(0, 2)}, 1: {111: slice(0, 2), 222: slice(3, 8), 333: slice(7, 10)}, }) assert computed_result.equals(expected) def test_3d_find_objects(label_image): label_image = da.stack([label_image, label_image], axis=2) result = dask_image.ndmeasure.find_objects(label_image) assert isinstance(result, dd.DataFrame) computed_result = result.compute() assert isinstance(computed_result, pd.DataFrame) expected = pd.DataFrame.from_dict({ 0: {111: slice(1, 3), 222: slice(3, 4), 333: slice(0, 2)}, 1: {111: slice(0, 2), 222: slice(3, 8), 333: slice(7, 10)}, 2: {111: slice(0, 2), 222: slice(0, 2), 333: slice(0, 2)}, }) assert computed_result.equals(expected) def test_find_objects_with_empty_chunks(label_image_with_empty_chunk): result = dask_image.ndmeasure.find_objects(label_image_with_empty_chunk) assert isinstance(result, dd.DataFrame) computed_result = result.compute() assert isinstance(computed_result, pd.DataFrame) expected = pd.DataFrame.from_dict({ 0: {111: slice(1, 3, None), 222: slice(4, 5, None)}, 1: {111: slice(0, 2, None), 222: slice(3, 6, None)}, }) assert computed_result.equals(expected) ================================================ FILE: tests/test_dask_image/test_ndmeasure/test_find_objects_no_dataframe.py ================================================ """ Test that ``find_objects`` raises a helpful ``ImportError`` when the optional ``dask[dataframe]`` / ``pandas`` dependencies are not installed. This is skipped if both dependencies are installed. """ import dask.array as da import pytest import dask_image.ndmeasure try: import pandas # noqa: F401 import dask.dataframe # noqa: F401 dataframe_available = True except ImportError: dataframe_available = False @pytest.mark.skipif( dataframe_available, reason="dataframe dependencies are installed; " "ImportError path only triggers without them", ) def test_find_objects_raises_import_error_without_pandas(): label_image = da.zeros((3, 3), dtype=int, chunks=(3, 3)) with pytest.raises( ImportError, match=( r"dask_image\.ndmeasure\.find_objects requires the optional " r"dependencies `dask\[dataframe\]` and `pandas`\. " r"Install them with `pip install dask-image\[dataframe\]`\." ), ): dask_image.ndmeasure.find_objects(label_image) ================================================ FILE: tests/test_dask_image/test_ndmorph/__init__.py ================================================ # -*- coding: utf-8 -*- ================================================ FILE: tests/test_dask_image/test_ndmorph/test__utils.py ================================================ #!/usr/bin/env python # -*- coding: utf-8 -*- import pytest import numpy as np import dask.array as da from dask_image.ndmorph import _utils @pytest.mark.parametrize( "err_type, input, structure", [ ( RuntimeError, da.ones([1, 2], dtype=bool, chunks=(1, 2,)), da.arange(2, dtype=bool, chunks=(2,)) ), ( TypeError, da.arange(2, dtype=bool, chunks=(2,)), 2.0 ), ] ) def test_errs__get_structure(err_type, input, structure): with pytest.raises(err_type): _utils._get_structure(input, structure) @pytest.mark.parametrize( "err_type, iterations", [ (TypeError, 0.0), (NotImplementedError, 0), ] ) def test_errs__get_iterations(err_type, iterations): with pytest.raises(err_type): _utils._get_iterations(iterations) @pytest.mark.parametrize( "err_type, input, mask", [ ( RuntimeError, da.arange(2, dtype=bool, chunks=(2,)), da.arange(1, dtype=bool, chunks=(2,)) ), ( TypeError, da.arange(2, dtype=bool, chunks=(2,)), 2.0 ), ] ) def test_errs__get_mask(err_type, input, mask): with pytest.raises(err_type): _utils._get_mask(input, mask) @pytest.mark.parametrize( "err_type, border_value", [ (TypeError, 0.0), (TypeError, 1.0), ] ) def test_errs__get_border_value(err_type, border_value): with pytest.raises(err_type): _utils._get_border_value(border_value) @pytest.mark.parametrize( "err_type, brute_force", [ (NotImplementedError, True), (TypeError, 1), ] ) def test_errs__get_brute_force(err_type, brute_force): with pytest.raises(err_type): _utils._get_brute_force(brute_force) @pytest.mark.parametrize( "expected, input, structure", [ ( np.array([1, 1, 1], dtype=bool), (da.arange(10, chunks=(10,)) % 2).astype(bool), None ), ( np.array([[0, 1, 0], [1, 1, 1], [0, 1, 0]], dtype=bool), (da.arange(100, chunks=10).reshape(10, 10) % 2).astype(bool), # noqa: E501 None ), ( np.array([1, 1, 1], dtype=bool), (da.arange(10, chunks=(10,)) % 2).astype(bool), np.array([1, 1, 1], dtype=int) ), ( np.array([1, 1, 1], dtype=bool), (da.arange(10, chunks=(10,)) % 2).astype(bool), np.array([1, 1, 1], dtype=bool) ), ] ) def test__get_structure(expected, input, structure): result = _utils._get_structure(input, structure) assert expected.dtype.type is result.dtype.type assert np.array((expected == result).all())[()] @pytest.mark.parametrize( "expected, iterations", [ (1, 1), (4, 4), ] ) def test__get_iterations(expected, iterations): assert expected == _utils._get_iterations(iterations) @pytest.mark.parametrize( "expected, a", [ (np.bool_, False), (np.int_, 2), (np.float64, 3.1), (np.complex128, 1 + 2j), (np.int16, np.int16(6)), (np.uint32, np.arange(3, dtype=np.uint32)), ] ) def test__get_dtype(expected, a): assert np.dtype(expected) is _utils._get_dtype(a) @pytest.mark.parametrize( "expected, input, mask", [ (True, da.arange(2, dtype=bool, chunks=(2,)), None), (True, da.arange(2, dtype=bool, chunks=(2,)), True), (False, da.arange(2, dtype=bool, chunks=(2,)), False), ( True, da.arange(2, dtype=bool, chunks=(2,)), np.bool_(True) ), ( False, da.arange(2, dtype=bool, chunks=(2,)), np.bool_(False) ), ( np.arange(2, dtype=bool), da.arange(2, dtype=bool, chunks=(2,)), np.arange(2, dtype=bool) ), ( da.arange(2, dtype=bool, chunks=(2,)), da.arange(2, dtype=bool, chunks=(2,)), da.arange(2, dtype=int, chunks=(2,)) ), ] ) def test__get_mask(expected, input, mask): result = _utils._get_mask(input, mask) assert type(expected) is type(result) if isinstance(expected, (np.ndarray, da.Array)): assert np.array((expected == result).all())[()] else: assert expected == result @pytest.mark.parametrize( "expected, border_value", [ (False, False), (True, True), (False, 0), (True, 1), (True, 5), (True, -2), ] ) def test__get_border_value(expected, border_value): assert expected == _utils._get_border_value(border_value) @pytest.mark.parametrize( "expected, brute_force", [ (False, False), ] ) def test__get_brute_force(expected, brute_force): assert expected == _utils._get_brute_force(brute_force) ================================================ FILE: tests/test_dask_image/test_ndmorph/test_cupy_ndmorph.py ================================================ #!/usr/bin/env python # -*- coding: utf-8 -*- import dask.array as da import numpy as np import pytest import dask_image.ndmorph cupy = pytest.importorskip("cupy", minversion="9.0.0") @pytest.fixture def array(): s = (10, 10) a = da.from_array(cupy.arange(int(np.prod(s)), dtype=cupy.float32).reshape(s), chunks=5) return a @pytest.mark.cupy @pytest.mark.parametrize("func", [ dask_image.ndmorph.binary_closing, dask_image.ndmorph.binary_dilation, dask_image.ndmorph.binary_erosion, dask_image.ndmorph.binary_opening, ]) def test_cupy_ndmorph(array, func): """Test convolve & correlate filters with cupy input arrays.""" result = func(array) assert result.dtype == bool assert result._meta.dtype == bool assert isinstance(result._meta, cupy.ndarray) computed = result.compute() assert computed.dtype == bool assert isinstance(computed, cupy.ndarray) ================================================ FILE: tests/test_dask_image/test_ndmorph/test_ndmorph.py ================================================ #!/usr/bin/env python # -*- coding: utf-8 -*- import pytest import numpy as np import scipy.ndimage import dask.array as da import dask_image.ndmorph @pytest.mark.parametrize( "funcname", [ "binary_closing", "binary_dilation", "binary_erosion", "binary_opening", ] ) @pytest.mark.parametrize( "err_type, input, structure, origin", [ ( RuntimeError, da.ones([1, 2], dtype=bool, chunks=(1, 2,)), da.arange(2, dtype=bool, chunks=(2,)), 0 ), ( TypeError, da.arange(2, dtype=bool, chunks=(2,)), 2.0, 0 ), ( TypeError, da.ones([2], dtype=bool, chunks=(2,)), da.arange(2, dtype=bool, chunks=(2,)), 0.0 ), ] ) def test_errs_binary_ops(funcname, err_type, input, structure, origin): da_func = getattr(dask_image.ndmorph, funcname) with pytest.raises(err_type): da_func( input, structure=structure, origin=origin ) @pytest.mark.parametrize( "funcname", [ "binary_closing", "binary_dilation", "binary_erosion", "binary_opening", ] ) @pytest.mark.parametrize( "err_type, input, structure, iterations, origin", [ ( TypeError, da.ones([2], dtype=bool, chunks=(2,)), da.arange(2, dtype=bool, chunks=(2,)), 1.0, 0 ), ( NotImplementedError, da.ones([2], dtype=bool, chunks=(2,)), da.arange(2, dtype=bool, chunks=(2,)), 0, 0 ) ] ) def test_errs_binary_ops_iter(funcname, err_type, input, structure, iterations, origin): da_func = getattr(dask_image.ndmorph, funcname) with pytest.raises(err_type): da_func( input, structure=structure, iterations=iterations, origin=origin ) @pytest.mark.parametrize( "funcname", [ "binary_closing", "binary_dilation", "binary_erosion", "binary_opening", ] ) @pytest.mark.parametrize( "err_type, input, structure, iterations, mask, border_value, origin" ", brute_force", [ ( RuntimeError, da.ones([2], dtype=bool, chunks=(2,)), da.arange(2, dtype=bool, chunks=(2,)), 1, da.arange(2, dtype=bool, chunks=(2,))[None], 0, 0, False ), ( TypeError, da.ones([2], dtype=bool, chunks=(2,)), da.arange(2, dtype=bool, chunks=(2,)), 1, da.arange(2, dtype=bool, chunks=(2,)), 2.0, 0, False ), ( NotImplementedError, da.ones([2], dtype=bool, chunks=(2,)), da.arange(2, dtype=bool, chunks=(2,)), 1, da.arange(2, dtype=bool, chunks=(2,)), 0, 0, True ), ] ) def test_errs_binary_ops_expanded(funcname, err_type, input, structure, iterations, mask, border_value, origin, brute_force): da_func = getattr(dask_image.ndmorph, funcname) with pytest.raises(err_type): da_func( input, structure=structure, iterations=iterations, mask=mask, border_value=border_value, origin=origin, brute_force=brute_force ) @pytest.mark.parametrize( "funcname", [ "binary_closing", "binary_dilation", "binary_erosion", "binary_opening", ] ) @pytest.mark.parametrize( "input, structure, origin", [ ( da.from_array( np.array( [[0, 0, 1, 1, 1, 0, 0, 0, 0, 0, 0, 0], [1, 0, 1, 0, 0, 0, 1, 0, 0, 0, 1, 0], [0, 0, 0, 0, 1, 0, 0, 1, 0, 0, 1, 1], [1, 1, 0, 0, 0, 1, 1, 1, 0, 1, 0, 1], [1, 1, 0, 1, 1, 1, 0, 1, 0, 1, 0, 0], [0, 0, 0, 1, 1, 1, 1, 1, 1, 0, 1, 0], [0, 1, 0, 1, 0, 0, 1, 0, 1, 0, 1, 1], [0, 1, 0, 1, 0, 1, 0, 0, 1, 0, 0, 0], [1, 0, 0, 0, 0, 1, 0, 1, 0, 0, 0, 0], [0, 0, 0, 0, 0, 0, 1, 1, 0, 1, 0, 0]], dtype=bool ), chunks=(5, 6) ), None, 0 ), ( da.from_array( np.array( [[0, 0, 1, 1, 1, 0, 0, 0, 0, 0, 0, 0], [1, 0, 1, 0, 0, 0, 1, 0, 0, 0, 1, 0], [0, 0, 0, 0, 1, 0, 0, 1, 0, 0, 1, 1], [1, 1, 0, 0, 0, 1, 1, 1, 0, 1, 0, 1], [1, 1, 0, 1, 1, 1, 0, 1, 0, 1, 0, 0], [0, 0, 0, 1, 1, 1, 1, 1, 1, 0, 1, 0], [0, 1, 0, 1, 0, 0, 1, 0, 1, 0, 1, 1], [0, 1, 0, 1, 0, 1, 0, 0, 1, 0, 0, 0], [1, 0, 0, 0, 0, 1, 0, 1, 0, 0, 0, 0], [0, 0, 0, 0, 0, 0, 1, 1, 0, 1, 0, 0]], dtype=bool ), chunks=(5, 6) ), np.ones([3, 3], dtype=bool), 0 ), ( da.from_array( np.array( [[0, 0, 1, 1, 1, 0, 0, 0, 0, 0, 0, 0], [1, 0, 1, 0, 0, 0, 1, 0, 0, 0, 1, 0], [0, 0, 0, 0, 1, 0, 0, 1, 0, 0, 1, 1], [1, 1, 0, 0, 0, 1, 1, 1, 0, 1, 0, 1], [1, 1, 0, 1, 1, 1, 0, 1, 0, 1, 0, 0], [0, 0, 0, 1, 1, 1, 1, 1, 1, 0, 1, 0], [0, 1, 0, 1, 0, 0, 1, 0, 1, 0, 1, 1], [0, 1, 0, 1, 0, 1, 0, 0, 1, 0, 0, 0], [1, 0, 0, 0, 0, 1, 0, 1, 0, 0, 0, 0], [0, 0, 0, 0, 0, 0, 1, 1, 0, 1, 0, 0]], dtype=bool ), chunks=(5, 6) ), np.ones([3, 3], dtype=bool), 1 ), ( da.from_array( np.array( [[0, 0, 1, 1, 1, 0, 0, 0, 0, 0, 0, 0], [1, 0, 1, 0, 0, 0, 1, 0, 0, 0, 1, 0], [0, 0, 0, 0, 1, 0, 0, 1, 0, 0, 1, 1], [1, 1, 0, 0, 0, 1, 1, 1, 0, 1, 0, 1], [1, 1, 0, 1, 1, 1, 0, 1, 0, 1, 0, 0], [0, 0, 0, 1, 1, 1, 1, 1, 1, 0, 1, 0], [0, 1, 0, 1, 0, 0, 1, 0, 1, 0, 1, 1], [0, 1, 0, 1, 0, 1, 0, 0, 1, 0, 0, 0], [1, 0, 0, 0, 0, 1, 0, 1, 0, 0, 0, 0], [0, 0, 0, 0, 0, 0, 1, 1, 0, 1, 0, 0]], dtype=bool ), chunks=(5, 6) ), np.ones([3, 3], dtype=bool), -1 ), ] ) def test_binary_ops(funcname, input, structure, origin): da_func = getattr(dask_image.ndmorph, funcname) sp_func = getattr(scipy.ndimage, funcname) da_result = da_func( input, structure=structure, origin=origin ) sp_result = sp_func( input, structure=structure, origin=origin ) da.utils.assert_eq(sp_result, da_result) @pytest.mark.parametrize( "funcname", [ "binary_closing", "binary_dilation", "binary_erosion", "binary_opening", ] ) @pytest.mark.parametrize( "input, structure, iterations, origin", [ ( da.from_array( np.array( [[0, 0, 1, 1, 1, 0, 0, 0, 0, 0, 0, 0], [1, 0, 1, 0, 0, 0, 1, 0, 0, 0, 1, 0], [0, 0, 0, 0, 1, 0, 0, 1, 0, 0, 1, 1], [1, 1, 0, 0, 0, 1, 1, 1, 0, 1, 0, 1], [1, 1, 0, 1, 1, 1, 0, 1, 0, 1, 0, 0], [0, 0, 0, 1, 1, 1, 1, 1, 1, 0, 1, 0], [0, 1, 0, 1, 0, 0, 1, 0, 1, 0, 1, 1], [0, 1, 0, 1, 0, 1, 0, 0, 1, 0, 0, 0], [1, 0, 0, 0, 0, 1, 0, 1, 0, 0, 0, 0], [0, 0, 0, 0, 0, 0, 1, 1, 0, 1, 0, 0]], dtype=bool ), chunks=(5, 6) ), np.ones([3, 3], dtype=bool), 3, 0 ), ( da.from_array( np.array( [[0, 0, 1, 1, 1, 0, 0, 0, 0, 0, 0, 0], [1, 0, 1, 0, 0, 0, 1, 0, 0, 0, 1, 0], [0, 0, 0, 0, 1, 0, 0, 1, 0, 0, 1, 1], [1, 1, 0, 0, 0, 1, 1, 1, 0, 1, 0, 1], [1, 1, 0, 1, 1, 1, 0, 1, 0, 1, 0, 0], [0, 0, 0, 1, 1, 1, 1, 1, 1, 0, 1, 0], [0, 1, 0, 1, 0, 0, 1, 0, 1, 0, 1, 1], [0, 1, 0, 1, 0, 1, 0, 0, 1, 0, 0, 0], [1, 0, 0, 0, 0, 1, 0, 1, 0, 0, 0, 0], [0, 0, 0, 0, 0, 0, 1, 1, 0, 1, 0, 0]], dtype=bool ), chunks=(5, 6) ), np.ones([3, 3], dtype=bool), 3, 1 ), ] ) def test_binary_ops_iter(funcname, input, structure, iterations, origin): da_func = getattr(dask_image.ndmorph, funcname) sp_func = getattr(scipy.ndimage, funcname) da_result = da_func( input, structure=structure, iterations=iterations, origin=origin ) sp_result = sp_func( input, structure=structure, iterations=iterations, origin=origin ) da.utils.assert_eq(sp_result, da_result) @pytest.mark.parametrize( "funcname", [ "binary_closing", "binary_dilation", "binary_erosion", "binary_opening", ] ) @pytest.mark.parametrize( "input, structure, iterations, mask, border_value, origin, brute_force", [ ( da.from_array( np.array( [[0, 0, 1, 1, 1, 0, 0, 0, 0, 0, 0, 0], [1, 0, 1, 0, 0, 0, 1, 0, 0, 0, 1, 0], [0, 0, 0, 0, 1, 0, 0, 1, 0, 0, 1, 1], [1, 1, 0, 0, 0, 1, 1, 1, 0, 1, 0, 1], [1, 1, 0, 1, 1, 1, 0, 1, 0, 1, 0, 0], [0, 0, 0, 1, 1, 1, 1, 1, 1, 0, 1, 0], [0, 1, 0, 1, 0, 0, 1, 0, 1, 0, 1, 1], [0, 1, 0, 1, 0, 1, 0, 0, 1, 0, 0, 0], [1, 0, 0, 0, 0, 1, 0, 1, 0, 0, 0, 0], [0, 0, 0, 0, 0, 0, 1, 1, 0, 1, 0, 0]], dtype=bool ), chunks=(5, 6) ), np.ones([3, 3], dtype=bool), 1, None, 1, 0, False ), ( da.from_array( np.array( [[0, 0, 1, 1, 1, 0, 0, 0, 0, 0, 0, 0], [1, 0, 1, 0, 0, 0, 1, 0, 0, 0, 1, 0], [0, 0, 0, 0, 1, 0, 0, 1, 0, 0, 1, 1], [1, 1, 0, 0, 0, 1, 1, 1, 0, 1, 0, 1], [1, 1, 0, 1, 1, 1, 0, 1, 0, 1, 0, 0], [0, 0, 0, 1, 1, 1, 1, 1, 1, 0, 1, 0], [0, 1, 0, 1, 0, 0, 1, 0, 1, 0, 1, 1], [0, 1, 0, 1, 0, 1, 0, 0, 1, 0, 0, 0], [1, 0, 0, 0, 0, 1, 0, 1, 0, 0, 0, 0], [0, 0, 0, 0, 0, 0, 1, 1, 0, 1, 0, 0]], dtype=bool ), chunks=(5, 6) ), np.ones([3, 3], dtype=bool), 1, da.from_array( np.array( [[0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0], [0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0], [0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0], [0, 0, 0, 1, 1, 1, 1, 1, 1, 0, 0, 0], [0, 0, 0, 1, 1, 1, 1, 1, 1, 0, 0, 0], [0, 0, 0, 1, 1, 1, 1, 1, 1, 0, 0, 0], [0, 0, 0, 1, 1, 1, 1, 1, 1, 0, 0, 0], [0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0], [0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0], [0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0]], dtype=bool ), chunks=(5, 6) ), 0, 0, False ), ( da.from_array( np.array( [[0, 0, 1, 1, 1, 0, 0, 0, 0, 0, 0, 0], [1, 0, 1, 0, 0, 0, 1, 0, 0, 0, 1, 0], [0, 0, 0, 0, 1, 0, 0, 1, 0, 0, 1, 1], [1, 1, 0, 0, 0, 1, 1, 1, 0, 1, 0, 1], [1, 1, 0, 1, 1, 1, 0, 1, 0, 1, 0, 0], [0, 0, 0, 1, 1, 1, 1, 1, 1, 0, 1, 0], [0, 1, 0, 1, 0, 0, 1, 0, 1, 0, 1, 1], [0, 1, 0, 1, 0, 1, 0, 0, 1, 0, 0, 0], [1, 0, 0, 0, 0, 1, 0, 1, 0, 0, 0, 0], [0, 0, 0, 0, 0, 0, 1, 1, 0, 1, 0, 0]], dtype=bool ), chunks=(5, 6) ), np.ones([3, 3], dtype=bool), 3, da.from_array( np.array( [[0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0], [0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0], [0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0], [0, 0, 0, 1, 1, 1, 1, 1, 1, 0, 0, 0], [0, 0, 0, 1, 1, 1, 1, 1, 1, 0, 0, 0], [0, 0, 0, 1, 1, 1, 1, 1, 1, 0, 0, 0], [0, 0, 0, 1, 1, 1, 1, 1, 1, 0, 0, 0], [0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0], [0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0], [0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0]], dtype=bool ), chunks=(5, 6) ), 0, 0, False ), ] ) def test_binary_ops_expanded(funcname, input, structure, iterations, mask, border_value, origin, brute_force): da_func = getattr(dask_image.ndmorph, funcname) sp_func = getattr(scipy.ndimage, funcname) da_result = da_func( input, structure=structure, iterations=iterations, mask=mask, border_value=border_value, origin=origin, brute_force=brute_force ) sp_result = sp_func( input, structure=structure, iterations=iterations, mask=mask, border_value=border_value, origin=origin, brute_force=brute_force ) da.utils.assert_eq(sp_result, da_result)