Showing preview only (1,997K chars total). Download the full file or copy to clipboard to get everything.
Repository: pySTEPS/pysteps
Branch: master
Commit: 1d472a50354c
Files: 264
Total size: 1.9 MB
Directory structure:
gitextract_7wafhhns/
├── .github/
│ └── workflows/
│ ├── check_black.yml
│ ├── python-publish.yml
│ └── test_pysteps.yml
├── .gitignore
├── .pre-commit-config.yaml
├── .readthedocs.yml
├── CITATION.bib
├── CONTRIBUTING.rst
├── LICENSE
├── MANIFEST.in
├── PKG-INFO
├── README.rst
├── ci/
│ ├── ci_test_env.yml
│ ├── fetch_pysteps_data.py
│ └── test_plugin_support.py
├── doc/
│ ├── .gitignore
│ ├── Makefile
│ ├── _static/
│ │ └── pysteps.css
│ ├── _templates/
│ │ └── layout.html
│ ├── make.bat
│ ├── rebuild_docs.sh
│ ├── requirements.txt
│ └── source/
│ ├── conf.py
│ ├── developer_guide/
│ │ ├── build_the_docs.rst
│ │ ├── contributors_guidelines.rst
│ │ ├── importer_plugins.rst
│ │ ├── pypi.rst
│ │ ├── test_pysteps.rst
│ │ └── update_conda_forge.rst
│ ├── index.rst
│ ├── pysteps_reference/
│ │ ├── blending.rst
│ │ ├── cascade.rst
│ │ ├── datasets.rst
│ │ ├── decorators.rst
│ │ ├── downscaling.rst
│ │ ├── extrapolation.rst
│ │ ├── feature.rst
│ │ ├── index.rst
│ │ ├── io.rst
│ │ ├── motion.rst
│ │ ├── noise.rst
│ │ ├── nowcasts.rst
│ │ ├── postprocessing.rst
│ │ ├── pysteps.rst
│ │ ├── timeseries.rst
│ │ ├── tracking.rst
│ │ ├── utils.rst
│ │ ├── verification.rst
│ │ └── visualization.rst
│ ├── references.bib
│ ├── user_guide/
│ │ ├── example_data.rst
│ │ ├── install_pysteps.rst
│ │ ├── machine_learning_pysteps.rst
│ │ ├── pystepsrc_example.rst
│ │ └── set_pystepsrc.rst
│ └── zz_bibliography.rst
├── environment.yml
├── environment_dev.yml
├── examples/
│ ├── LK_buffer_mask.py
│ ├── README.txt
│ ├── advection_correction.py
│ ├── anvil_nowcast.py
│ ├── data_transformations.py
│ ├── ens_kalman_filter_blended_forecast.py
│ ├── linda_nowcasts.py
│ ├── my_first_nowcast.ipynb
│ ├── optical_flow_methods_convergence.py
│ ├── plot_cascade_decomposition.py
│ ├── plot_custom_precipitation_range.py
│ ├── plot_ensemble_verification.py
│ ├── plot_extrapolation_nowcast.py
│ ├── plot_linear_blending.py
│ ├── plot_noise_generators.py
│ ├── plot_optical_flow.py
│ ├── plot_steps_nowcast.py
│ ├── probability_forecast.py
│ ├── rainfarm_downscale.py
│ ├── steps_blended_forecast.py
│ └── thunderstorm_detection_and_tracking.py
├── pyproject.toml
├── pysteps/
│ ├── __init__.py
│ ├── blending/
│ │ ├── __init__.py
│ │ ├── clim.py
│ │ ├── ens_kalman_filter_methods.py
│ │ ├── interface.py
│ │ ├── linear_blending.py
│ │ ├── pca_ens_kalman_filter.py
│ │ ├── skill_scores.py
│ │ ├── steps.py
│ │ └── utils.py
│ ├── cascade/
│ │ ├── __init__.py
│ │ ├── bandpass_filters.py
│ │ ├── decomposition.py
│ │ └── interface.py
│ ├── datasets.py
│ ├── decorators.py
│ ├── downscaling/
│ │ ├── __init__.py
│ │ ├── interface.py
│ │ └── rainfarm.py
│ ├── exceptions.py
│ ├── extrapolation/
│ │ ├── __init__.py
│ │ ├── interface.py
│ │ └── semilagrangian.py
│ ├── feature/
│ │ ├── __init__.py
│ │ ├── blob.py
│ │ ├── interface.py
│ │ ├── shitomasi.py
│ │ └── tstorm.py
│ ├── io/
│ │ ├── __init__.py
│ │ ├── archive.py
│ │ ├── exporters.py
│ │ ├── importers.py
│ │ ├── interface.py
│ │ ├── mch_lut_8bit_Metranet_AZC_V104.txt
│ │ ├── mch_lut_8bit_Metranet_v103.txt
│ │ ├── nowcast_importers.py
│ │ └── readers.py
│ ├── motion/
│ │ ├── __init__.py
│ │ ├── _proesmans.pyx
│ │ ├── _vet.pyx
│ │ ├── constant.py
│ │ ├── darts.py
│ │ ├── farneback.py
│ │ ├── interface.py
│ │ ├── lucaskanade.py
│ │ ├── proesmans.py
│ │ └── vet.py
│ ├── noise/
│ │ ├── __init__.py
│ │ ├── fftgenerators.py
│ │ ├── interface.py
│ │ ├── motion.py
│ │ └── utils.py
│ ├── nowcasts/
│ │ ├── __init__.py
│ │ ├── anvil.py
│ │ ├── extrapolation.py
│ │ ├── interface.py
│ │ ├── lagrangian_probability.py
│ │ ├── linda.py
│ │ ├── sprog.py
│ │ ├── sseps.py
│ │ ├── steps.py
│ │ └── utils.py
│ ├── postprocessing/
│ │ ├── __init__.py
│ │ ├── diagnostics.py
│ │ ├── ensemblestats.py
│ │ ├── interface.py
│ │ └── probmatching.py
│ ├── pystepsrc
│ ├── pystepsrc_schema.json
│ ├── scripts/
│ │ ├── __init__.py
│ │ ├── fit_vel_pert_params.py
│ │ └── run_vel_pert_analysis.py
│ ├── tests/
│ │ ├── __init__.py
│ │ ├── helpers.py
│ │ ├── test_archive.py
│ │ ├── test_blending_clim.py
│ │ ├── test_blending_linear_blending.py
│ │ ├── test_blending_pca_ens_kalman_filter.py
│ │ ├── test_blending_skill_scores.py
│ │ ├── test_blending_steps.py
│ │ ├── test_blending_utils.py
│ │ ├── test_cascade.py
│ │ ├── test_datasets.py
│ │ ├── test_decorators.py
│ │ ├── test_downscaling_rainfarm.py
│ │ ├── test_ensscores.py
│ │ ├── test_exporters.py
│ │ ├── test_extrapolation_semilagrangian.py
│ │ ├── test_feature.py
│ │ ├── test_feature_tstorm.py
│ │ ├── test_importer_decorator.py
│ │ ├── test_interfaces.py
│ │ ├── test_io_archive.py
│ │ ├── test_io_bom_rf3.py
│ │ ├── test_io_dwd_hdf5.py
│ │ ├── test_io_fmi_geotiff.py
│ │ ├── test_io_fmi_pgm.py
│ │ ├── test_io_knmi_hdf5.py
│ │ ├── test_io_mch_gif.py
│ │ ├── test_io_mrms_grib.py
│ │ ├── test_io_nowcast_importers.py
│ │ ├── test_io_opera_hdf5.py
│ │ ├── test_io_readers.py
│ │ ├── test_io_saf_crri.py
│ │ ├── test_motion.py
│ │ ├── test_motion_farneback.py
│ │ ├── test_motion_lk.py
│ │ ├── test_noise_fftgenerators.py
│ │ ├── test_noise_motion.py
│ │ ├── test_nowcasts_anvil.py
│ │ ├── test_nowcasts_lagrangian_probability.py
│ │ ├── test_nowcasts_linda.py
│ │ ├── test_nowcasts_sprog.py
│ │ ├── test_nowcasts_sseps.py
│ │ ├── test_nowcasts_steps.py
│ │ ├── test_nowcasts_utils.py
│ │ ├── test_paramsrc.py
│ │ ├── test_plt_animate.py
│ │ ├── test_plt_cartopy.py
│ │ ├── test_plt_motionfields.py
│ │ ├── test_plt_precipfields.py
│ │ ├── test_plugins_support.py
│ │ ├── test_postprocessing_ensemblestats.py
│ │ ├── test_postprocessing_probmatching.py
│ │ ├── test_timeseries_autoregression.py
│ │ ├── test_tracking_tdating.py
│ │ ├── test_utils_arrays.py
│ │ ├── test_utils_cleansing.py
│ │ ├── test_utils_conversion.py
│ │ ├── test_utils_dimension.py
│ │ ├── test_utils_interpolate.py
│ │ ├── test_utils_pca.py
│ │ ├── test_utils_reprojection.py
│ │ ├── test_utils_spectral.py
│ │ ├── test_utils_transformation.py
│ │ ├── test_verification_detcatscores.py
│ │ ├── test_verification_detcontscores.py
│ │ ├── test_verification_probscores.py
│ │ ├── test_verification_salscores.py
│ │ └── test_verification_spatialscores.py
│ ├── timeseries/
│ │ ├── __init__.py
│ │ ├── autoregression.py
│ │ └── correlation.py
│ ├── tracking/
│ │ ├── __init__.py
│ │ ├── interface.py
│ │ ├── lucaskanade.py
│ │ └── tdating.py
│ ├── utils/
│ │ ├── __init__.py
│ │ ├── arrays.py
│ │ ├── check_norain.py
│ │ ├── cleansing.py
│ │ ├── conversion.py
│ │ ├── dimension.py
│ │ ├── fft.py
│ │ ├── images.py
│ │ ├── interface.py
│ │ ├── interpolate.py
│ │ ├── pca.py
│ │ ├── reprojection.py
│ │ ├── spectral.py
│ │ ├── tapering.py
│ │ └── transformation.py
│ ├── verification/
│ │ ├── __init__.py
│ │ ├── detcatscores.py
│ │ ├── detcontscores.py
│ │ ├── ensscores.py
│ │ ├── interface.py
│ │ ├── lifetime.py
│ │ ├── plots.py
│ │ ├── probscores.py
│ │ ├── salscores.py
│ │ └── spatialscores.py
│ └── visualization/
│ ├── __init__.py
│ ├── animations.py
│ ├── basemaps.py
│ ├── motionfields.py
│ ├── precipfields.py
│ ├── spectral.py
│ ├── thunderstorms.py
│ └── utils.py
├── requirements.txt
├── requirements_dev.txt
├── setup.py
└── tox.ini
================================================
FILE CONTENTS
================================================
================================================
FILE: .github/workflows/check_black.yml
================================================
# This workflow will test the code base using the LATEST version of black
# IMPORTANT: Black is under development. Hence, minor fommatting changes between
# different version are expected.
# If this test fails, install the latest version of black and then run black.
# Preferably, run black only on the files that you have modified.
# This will faciliate the revision of the proposed changes.
name: Check Black
on:
# Triggers the workflow on push or pull request events but only for the master branch
push:
branches: [ master ]
pull_request:
branches: [ master ]
jobs:
build:
runs-on: ubuntu-latest
steps:
- uses: actions/checkout@v4
- name: Set up Python 3.11
uses: actions/setup-python@v5
with:
python-version: "3.11"
- name: Install dependencies
run: |
python -m pip install --upgrade pip
pip install black
- name: Black version
run: black --version
- name: Black check
working-directory: ${{github.workspace}}
run: black --check .
================================================
FILE: .github/workflows/python-publish.yml
================================================
# This workflows will upload a Python Package using Twine when a release is created
# For more information see: https://help.github.com/en/actions/language-and-framework-guides/using-python-with-github-actions#publishing-to-package-registries
name: Upload Python Package
on:
release:
types: [published]
jobs:
deploy:
runs-on: ubuntu-latest
permissions:
id-token: write
steps:
- uses: actions/checkout@v4
- name: Set up Python
uses: actions/setup-python@v5
with:
python-version: '3.x'
- name: Install dependencies
run: |
python -m pip install --upgrade pip
pip install setuptools wheel numpy cython
- name: Build
run: |
python setup.py sdist
- name: Publish
uses: pypa/gh-action-pypi-publish@release/v1
================================================
FILE: .github/workflows/test_pysteps.yml
================================================
name: Test pysteps
on:
# Triggers the workflow on push or pull request events to the master branch
push:
branches:
- master
- pysteps-v2
pull_request:
branches:
- master
- pysteps-v2
jobs:
unit_tests:
name: Unit Tests (${{ matrix.python-version }}, ${{ matrix.os }})
runs-on: ${{ matrix.os }}
strategy:
fail-fast: false
matrix:
os: [ "ubuntu-latest", "macos-latest", "windows-latest" ]
python-version: ["3.11", "3.13"]
max-parallel: 6
defaults:
run:
shell: bash -l {0}
steps:
- uses: actions/checkout@v4
- uses: actions/setup-python@v5
with:
python-version: ${{ matrix.python-version }}
# need headless opencv on Linux, see https://github.com/conda-forge/opencv-feedstock/issues/401
- name: Install mamba and create environment for Linux
if: matrix.os == 'ubuntu-latest'
uses: mamba-org/setup-micromamba@v1
with:
# https://github.com/mamba-org/setup-micromamba/issues/225
micromamba-version: 1.5.10-0
environment-file: ci/ci_test_env.yml
environment-name: test_environment
generate-run-shell: false
create-args: >-
python=${{ matrix.python-version }}
libopencv=*=headless*
- name: Install mamba and create environment (not Linux)
if: matrix.os != 'ubuntu-latest'
uses: mamba-org/setup-micromamba@v1
with:
# https://github.com/mamba-org/setup-micromamba/issues/225
micromamba-version: 1.5.10-0
environment-file: ci/ci_test_env.yml
environment-name: test_environment
generate-run-shell: false
create-args: python=${{ matrix.python-version }}
- name: Install pygrib (not win)
if: matrix.os != 'windows-latest'
run: mamba install --quiet pygrib
- name: Install pysteps for MacOS
if: matrix.os == 'macos-latest'
working-directory: ${{github.workspace}}
env:
CC: gcc-13
CXX: g++-13
CXX1X: g++-13
HOMEBREW_NO_INSTALL_CLEANUP: 1
run: |
brew update-reset
brew update
gcc-13 --version || brew install gcc@13
pip install .
- name: Install pysteps
if: matrix.os != 'macos-latest'
working-directory: ${{github.workspace}}
run: pip install .
- name: Download pysteps data
env:
PYSTEPS_DATA_PATH: ${{github.workspace}}/pysteps_data
working-directory: ${{github.workspace}}/ci
run: python fetch_pysteps_data.py
- name: Check imports
working-directory: ${{github.workspace}}/pysteps_data
run: |
python --version
python -c "import pysteps; print(pysteps.__file__)"
python -c "from pysteps import motion"
python -c "from pysteps.motion import vet"
python -c "from pysteps.motion import proesmans"
- name: Run tests and coverage report
working-directory: ${{github.workspace}}/pysteps_data
env:
PYSTEPSRC: ${{github.workspace}}/pysteps_data/pystepsrc
run: pytest --pyargs pysteps --cov=pysteps --cov-report=xml --cov-report=term -ra
- name: Upload coverage to Codecov (Linux only)
if: matrix.os == 'ubuntu-latest'
uses: codecov/codecov-action@v4
env:
OS: ${{ matrix.os }}
PYTHON: ${{ matrix.python-version }}
with:
token: ${{ secrets.CODECOV_TOKEN }}
files: ${{github.workspace}}/pysteps_data/coverage.xml
flags: unit_tests
env_vars: OS,PYTHON
fail_ci_if_error: true
verbose: true
================================================
FILE: .gitignore
================================================
# Byte-compiled / optimized / DLL files
__pycache__/
*.py[cod]
*$py.class
# C extensions
*.so
*.c
# Distribution / packaging
.Python
.tox
build/
develop-eggs/
dist/
downloads/
eggs/
.eggs/
lib/
lib64/
parts/
sdist/
var/
wheels/
*.egg-info/
.installed.cfg
*.egg
MANIFEST
# PyInstaller
# Usually these files are written by a python script from a template
# before PyInstaller builds the exe, so as to inject date/other infos into it.
*.manifest
*.spec
# Installer logs
pip-log.txt
pip-delete-this-directory.txt
# Unit test / coverage reports
htmlcov/
.tox/
.coverage
.coverage.*
.cache
nosetests.xml
coverage.xml
*.cover
.hypothesis/
.pytest_cache/
# Translations
*.mo
*.pot
# Sphinx documentation
docs/_build/
doc/_build/
# Jupyter Notebook
.ipynb_checkpoints
# pyenv
.python-version
# Environments
.env
.venv
env/
venv/
ENV/
env.bak/
venv.bak/
# Pycharm
.idea
# Spyder project settings
.spyderproject
.spyproject
# VSCode
.vscode
# Rope project settings
.ropeproject
# mypy
.mypy_cache/
# Mac OS Stuff
.DS_Store
# Running local tests
/tmp
/pysteps/tests/tmp/
================================================
FILE: .pre-commit-config.yaml
================================================
repos:
- repo: https://github.com/psf/black
rev: 26.1.0
hooks:
- id: black
language_version: python3
================================================
FILE: .readthedocs.yml
================================================
# Read the Docs configuration file
# See https://docs.readthedocs.io/en/stable/config-file/v2.html for details
version: 2
# the build.os and build.tools section is mandatory
build:
os: "ubuntu-22.04"
tools:
python: "3.11"
sphinx:
configuration: doc/source/conf.py
formats:
- htmlzip
python:
install:
- requirements: requirements.txt
- requirements: doc/requirements.txt
- method: pip
path: .
================================================
FILE: CITATION.bib
================================================
@Article{gmd-12-4185-2019,
AUTHOR = {Pulkkinen, S. and Nerini, D. and P\'erez Hortal, A. A. and Velasco-Forero, C. and Seed, A. and Germann, U. and Foresti, L.},
TITLE = {Pysteps: an open-source Python library for probabilistic precipitation nowcasting (v1.0)},
JOURNAL = {Geoscientific Model Development},
VOLUME = {12},
YEAR = {2019},
NUMBER = {10},
PAGES = {4185--4219},
URL = {https://gmd.copernicus.org/articles/12/4185/2019/},
DOI = {10.5194/gmd-12-4185-2019}
}
@article{qj.4461,
AUTHOR = {Imhoff, Ruben O. and De Cruz, Lesley and Dewettinck, Wout and Brauer, Claudia C. and Uijlenhoet, Remko and van Heeringen, Klaas-Jan and Velasco-Forero, Carlos and Nerini, Daniele and Van Ginderachter, Michiel and Weerts, Albrecht H.},
TITLE = {Scale-dependent blending of ensemble rainfall nowcasts and NWP in the open-source pysteps library},
JOURNAL = {Quarterly Journal of the Royal Meteorological Society},
VOLUME = {n/a},
NUMBER = {n/a},
YEAR = {2023},
PAGES ={1--30},
DOI = {https://doi.org/10.1002/qj.4461},
URL = {https://rmets.onlinelibrary.wiley.com/doi/abs/10.1002/qj.4461},
}
================================================
FILE: CONTRIBUTING.rst
================================================
Contributing to pysteps
=======================
Welcome! Pysteps is a community-driven initiative for developing and
maintaining an easy to use, modular, free and open-source Python
framework for short-term ensemble prediction systems.
There are many ways to contribute to pysteps:
* contributing bug reports and feature requests
* contributing documentation
* code contributions, new features, or bug fixes
* contribute with usage examples
Workflow for code contributions
-------------------------------
We welcome all kinds of contributions, like documentation updates, bug fixes, or new features.
The workflow for the contibutions uses the usual
`GitHub pull-request flow <https://help.github.com/en/articles/github-flow>`_.
If you have ideas for new contributions to the project, feel free to get in touch with the pysteps community on our
`pysteps slack <https://pysteps.slack.com/>`__.
To get access to it, you need to ask for an invitation or you can use the automatic invitation page
`here <https://pysteps-slackin.herokuapp.com/>`__.
Our slack channel is a great place for preliminary discussions about new features or functionalities.
Another place where you can report bugs and suggest new enhancements is the
`project's issue tracker <https://github.com/pySTEPS/pysteps/issues>`_.
First Time Contributors
-----------------------
If you are interested in helping to improve pysteps,
the best way to get started is by looking for "Good First Issue" in the
`issue tracker <https://github.com/pySTEPS/pysteps/issues>`_.
In a nutshell, the main steps to follow for contributing to pysteps are:
* Setting up the development environment
* Fork the repository
* Install pre-commit hooks
* Create a new branch for each contribution
* Read the Code Style guide
* Work on your changes
* Test your changes
* Push to your fork repository and create a new PR in GitHub.
Setting up the Development environment
--------------------------------------
The recommended way to setup up the developer environment is the Anaconda
(commonly referred to as Conda).
Conda quickly installs, runs, and updates packages and their dependencies.
It also allows you to create, save, load, and switch between different environments on your local computer.
Before continuing, Mac OSX users also need to install a more recent compiler.
See instructions `here <https://pysteps.readthedocs.io/en/latest/user_guide/install_pysteps.html#install-osx-users>`__.
The developer environment can be created from the file
`environment_dev.yml <https://github.com/pySTEPS/pysteps/blob/master/environment_dev.yml>`_
in the project's root directory by running the command::
conda env create -f environment_dev.yml
This will create the **pysteps_dev** environment that can be activated using::
conda activate pysteps_dev
Once the environment is activated, the latest version of pysteps can be installed
in development mode, in such a way that the project appears to be installed,
but yet is still editable from the source tree::
pip install -e <path to local pysteps repo>
To test if the installation went fine, you can try importing pysteps from the python interpreter by running::
python -c "import pysteps"
Fork the repository
~~~~~~~~~~~~~~~~~~~
Once you have set the development environment, the next step is creating your local copy of the repository, where you will commit your modifications.
The steps to follow are:
#. Set up Git on your computer.
#. Create a GitHub account (if you don't have one).
#. Fork the repository in your GitHub.
#. Clone a local copy of your fork. For example::
git clone https://github.com/<your-account>/pysteps.git
Done!, now you have a local copy of pysteps git repository.
If you are new to GitHub, below you can find a list of helpful tutorials:
- http://rogerdudler.github.io/git-guide/index.html
- https://www.atlassian.com/git/tutorials
Install pre-commit hooks
~~~~~~~~~~~~~~~~~~~~~~~~
After setting up your development environment, install the git pre-commit hook by executing the following command in the repository's
root::
pre-commit install
The pre-commit hooks are scripts executed automatically in every commit to identify simple issues with the code.
When an issue is identified (the pre-commit script exits with non-zero status), the hook aborts the commit and prints the error.
Currently, pysteps only tests that the code to be committed complies with black's format style.
In case that the commit is aborted, you only need to run black in the entire source code.
This can be done by running :code:`black .` or :code:`pre-commit run --all-files`.
The latter is recommended since it indicates if the commit contained any formatting errors (that are automatically corrected).
Black's configuration is stored in the `pyproject.toml` file to ensure that the same configuration is used in every development environment.
This configuration is automatically loaded when black is run from any directory in the
pysteps project.
IMPORTANT: Periodically update the black version used in the pre-commit hook by running::
pre-commit autoupdate
For more information about git hooks and the pre-commit package, see:
- https://git-scm.com/book/en/v2/Customizing-Git-Git-Hooks
- https://pre-commit.com/
Create a new branch
~~~~~~~~~~~~~~~~~~~
As a collaborator, all the new contributions you want should be made in a new branch under your forked repository.
Working on the master branch is reserved for Core Contributors only.
Core Contributors are developers that actively work and maintain the repository.
They are the only ones who accept pull requests and push commits directly to the pysteps repository.
For more information on how to create and work with branches, see
`"Branches in a Nutshell" <https://git-scm.com/book/en/v2/Git-Branching-Branches-in-a-Nutshell>`__ in the Git documentation
Code Style
----------
We strongly suggest following the
`PEP8 coding standards <https://www.python.org/dev/peps/pep-0008/>`__.
Since PEP8 is a set of recommendations, these are the most important good coding practices for the pysteps project:
* Always use four spaces for indentation (don’t use tabs).
* Max line-length: 88 characters (note that we don't use the PEP8's 79 value). Enforced by `black`.
* Always indent wrapped code for readability. Enforced by `black`.
* Avoid extraneous whitespace. Enforced by `black`.
* Don’t use whitespace to line up assignment operators (=, :). Enforced by `black`.
* Avoid writing multiple statements in the same line.
* Naming conventions should follow the recomendations from
the `Google's python style guide <http://google.github.io/styleguide/pyguide.html>`__, summarized as follows:
.. raw:: html
<table rules="all" border="1" cellspacing="2" cellpadding="2">
<tr>
<th>Type</th>
<th>Public</th>
<th>Internal</th>
</tr>
<tr>
<td>Packages</td>
<td><code>lower_with_under</code></td>
<td></td>
</tr>
<tr>
<td>Modules</td>
<td><code>lower_with_under</code></td>
<td><code>_lower_with_under</code></td>
</tr>
<tr>
<td>Classes</td>
<td><code>CapWords</code></td>
<td><code>_CapWords</code></td>
</tr>
<tr>
<td>Exceptions</td>
<td><code>CapWords</code></td>
<td></td>
</tr>
<tr>
<td>Functions</td>
<td><code>lower_with_under()</code></td>
<td><code>_lower_with_under()</code></td>
</tr>
<tr>
<td>Global/Class Constants</td>
<td><code>CAPS_WITH_UNDER</code></td>
<td><code>_CAPS_WITH_UNDER</code></td>
</tr>
<tr>
<td>Global/Class Variables</td>
<td><code>lower_with_under</code></td>
<td><code>_lower_with_under</code></td>
</tr>
<tr>
<td>Instance Variables</td>
<td><code>lower_with_under</code></td>
<td><code>_lower_with_under</code> (protected)</td>
</tr>
<tr>
<td>Method Names</td>
<td><code>lower_with_under()</code></td>
<td><code>_lower_with_under()</code> (protected)</td>
</tr>
<tr>
<td>Function/Method Parameters</td>
<td><code>lower_with_under</code></td>
<td></td>
</tr>
<tr>
<td>Local Variables</td>
<td><code>lower_with_under</code></td>
<td></td>
</tr>
</table>
(source: `Section 3.16.4, Google's python style guide <http://google.github.io/styleguide/pyguide.html>`__)
- If you need to ignore part of the variables returned by a function,
use "_" (single underscore) or __ (double underscore)::
precip, __, metadata = import_bom_rf3('example_file.bom')
precip2, _, metadata2 = import_bom_rf3('example_file2.bom')
- Zen of Python (`PEP 20 <https://www.python.org/dev/peps/pep-0020/>`__), the guiding principles for Python’s
design::
>>> import this
The Zen of Python, by Tim Peters
Beautiful is better than ugly.
Explicit is better than implicit.
Simple is better than complex.
Complex is better than complicated.
Flat is better than nested.
Sparse is better than dense.
Readability counts.
Special cases aren't special enough to break the rules.
Although practicality beats purity.
Errors should never pass silently.
Unless explicitly silenced.
In the face of ambiguity, refuse the temptation to guess.
There should be one-- and preferably only one --obvious way to do it.
Although that way may not be obvious at first unless you're Dutch.
Now is better than never.
Although never is often better than *right* now.
If the implementation is hard to explain, it's a bad idea.
If the implementation is easy to explain, it may be a good idea.
Namespaces are one honking great idea -- let's do more of those!
For more suggestions on good coding practices for python, check these guidelines:
- `The Hitchhiker's Guide to Python <https://docs.python-guide.org/writing/style/>`__
- `Google's python style guide <http://google.github.io/styleguide/pyguide.html>`__
- `PEP8 <https://www.python.org/dev/peps/pep-0008/>`__
**Using Black auto-formatter**
To ensure a minimal style consistency, we use
`black <https://black.readthedocs.io/en/stable/>`__ to auto-format to the source code.
The black configuration used in the pysteps project is defined in the pyproject.toml,
and it is automatically detected by black.
Black can be installed using any of the following::
conda install black
#For the latest version:
conda install -c conda-forge black
pip install black
Check the `official documentation <https://black.readthedocs.io/en/stable/the_black_code_style.html>`__
for more information.
**Docstrings**
Every module, function, or class must have a docstring that describe its
purpose and how to use it. The docstrings follows the conventions described in the
`PEP 257 <https://www.python.org/dev/peps/pep-0257/#multi-line-docstrings>`__
and the
`Numpy's docstrings format <https://numpydoc.readthedocs.io/en/latest/format.html>`__.
Here is a summary of the most important rules:
- Always use triple quotes for doctrings, even if it fits a single line.
- For one-line docstring, end the phrase with a period.
- Use imperative mood for all docstrings ("""Return some value.""") rather than descriptive mood
("""Returns some value.""").
Here is an example of a docstring::
def adjust_lag2_corrcoef1(gamma_1, gamma_2):
"""
A simple adjustment of lag-2 temporal autocorrelation coefficient to
ensure that the resulting AR(2) process is stationary when the parameters
are estimated from the Yule-Walker equations.
Parameters
----------
gamma_1 : float
Lag-1 temporal autocorrelation coeffient.
gamma_2 : float
Lag-2 temporal autocorrelation coeffient.
Returns
-------
out : float
The adjusted lag-2 correlation coefficient.
"""
Contributions guidelines
------------------------
The collaborator guidelines used in pysteps were largely inspired by those of the
`MyPy project <https://github.com/python/mypy>`__.
Collaborators guidelines
~~~~~~~~~~~~~~~~~~~~~~~~
As a collaborator, all your new contributions should be made in a new branch under your forked repository.
Working on the master branch is reserved for Core Contributors only to submit small changes only.
Core Contributors are developers that actively work and maintain the repository.
They are the only ones who accept pull requests and push commits directly to
the **pysteps** repository.
**IMPORTANT**
However, for contribution requires a significant amount of work, we strongly suggest opening a new issue with
the **enhancement** or **discussion** tag to encourage discussions.
The discussions will help clarify the best way to approach the suggested changes or raise potential concerns.
For code contributions, collaboratos can use the usual
`GitHub pull-request flow <https://help.github.com/en/articles/github-flow>`__.
Once your proposed changes are ready, you need to create a pull request (PR) from your fork in your GitHub account.
Afterward, core contributors will review your proposed changes, provide feedback in the PR discussion, and maybe,
request changes to the code. Once the PR is ready, a Core Developer will merge the changes into the main branch.
**Important:**
It is strongly suggested that each PR only addresses a single objective (e.g., fix a bug, improve documentation, etc.).
This will help to reduce the time needed to process the PR. For changes outside the PR's objectives, we highly
recommend opening a new PR.
Testing your changes
~~~~~~~~~~~~~~~~~~~~
Before committing changes or creating pull requests, check that all the tests in the pysteps suite pass.
See the `Testing pysteps <https://pysteps.readthedocs.io/en/latest/developer_guide/test_pysteps.html#testing-pysteps>`__
for detailed instruction to run the tests.
Although it is not strictly needed, we suggest creating minimal tests for new contributions to ensure that it achieves
the desired behavior. Pysteps uses the pytest framework that it is easy to use and also supports complex functional
testing for applications and libraries.
Check the `pytests official documentation <https://docs.pytest.org/en/latest/index.html>`_ for more information.
The tests should be placed under the
`pysteps.tests <https://github.com/pySTEPS/pysteps/tree/master/pysteps/tests>`_
module.
The file should follow the **test_*.py** naming convention and have a
descriptive name.
A quick way to get familiar with the pytest syntax and the testing procedures
is checking the python scripts present in the pysteps test module.
Core developer guidelines
~~~~~~~~~~~~~~~~~~~~~~~~~
Working directly on the master branch is discouraged and is reserved only
for small changes and updates that do not compromise the stability of the code.
The *master* branch is a production branch that is ready to be deployed
(cloned, installed, and ready to use).
In consequence, this master branch is meant to be stable.
The pysteps repository uses the GitHub Actions service to run tests every time you commit to GitHub.
In that way, your modifications along with the entire library are tested.
Pushing untested or work-in-progress changes to the master branch can potentially introduce bugs or break the stability of the package.
Since the tests triggered by a commit to the master branch take around 20 minutes, any errors introduced there
will be noticed after the stablility of the master branch was compromised.
In addition, other developers start working on a new feature from master from a potentially broken state.
Instead, it is recommended to work on each new feature in its own branch, which can be pushed to the central repository
for backup/collaboration. When you’re done with the feature's development work, you can merge the feature branch into the
master or submit a Pull Request. This approach has two main advantages:
- Every commit on the feature branch is tested via GitHub Actions.
If the tests fail, they do not affect the **master** branch.
- Once the changes are finished and the tests passed, the commits history can be squashed into a single commit and
then merged into the master branch. Squashing the commits helps to keep a clean commit history in the main branch.
Processing pull requests
~~~~~~~~~~~~~~~~~~~~~~~~
.. _`Squash and merge`: https://github.com/blog/2141-squash-your-commits
To process the pull request, we follow similar rules to those used in the
`mypy developer guidelines <https://github.com/python/mypy/blob/master/CONTRIBUTING.md#core-developer-guidelines>`_:
* Always wait for tests to pass before merging PRs.
* Always use "`Squash and merge`_" to merge PRs.
* Make sure that the subject of the commit message summarizes the objective of the PR and does not finish with a dot.
* Write a new commit message before merging that provides a detailed description of the changes introduced by the PR.
Try to keep the maximum line length under 80 characters, spplitting lines if necessary.
**IMPORTANT:** Make sure that the commit message doesn't contain the branch's commit history!
Also, if the PR fixes an issue, mention this explicitly.
* Use the imperative mood in the subject line (e.g. "Fix typo in README").
After the PR is merged, the merged branch can be safely deleted.
Preparing a new release
~~~~~~~~~~~~~~~~~~~~~~~
Core developers should follow the steps to prepare a new release (version):
1. Before creating the actual release in GitHub, be sure that every item in the following checklist was followed:
* In the file setup.py, update the **version="X.X.X"** keyword in the setup
function.
* Update the version in PKG-INFO file.
* If new dependencies were added to pysteps since the last release, add
them to the **environment.yml, requirements.txt**, and
**requirements_dev.txt** files.
#. Create a new release in GitHub following
`these guidelines <https://help.github.com/en/articles/creating-releases>`_.
Include a detailed changelog in the release.
#. Generating the source distribution for new pysteps version and upload it to
the `Python Package Index <https://pypi.org/>`_ (PyPI).
See `Packaging the pysteps project <https://pysteps.readthedocs.io/en/latest/developer_guide/pypi.html#pypi-relase>`__
for a detailed description of this process.
#. Update the conda-forge pysteps-feedstock following this guidelines:
`Updating the conda-forge pysteps-feedstock <https://pysteps.readthedocs.io/en/latest/developer_guide/update_conda_forge.html#update-conda-feedstock>`__
Credits
-------
This document was based on contributors guides of two Python
open-source projects:
* Py-Art_: Copyright (c) 2013, UChicago Argonne, LLC.
`License <https://github.com/ARM-DOE/pyart/blob/master/LICENSE.txt>`_.
* mypy_: Copyright (c) 2015-2016 Jukka Lehtosalo and contributors.
`MIT License <https://github.com/python/mypy/blob/master/LICENSE>`_.
* Official github documentation (https://help.github.com)
.. _Py-Art: https://github.com/ARM-DOE/pyart
.. _mypy: https://github.com/python/mypy
================================================
FILE: LICENSE
================================================
BSD 3-Clause License
Copyright (c) 2019, PySteps developers
All rights reserved.
Redistribution and use in source and binary forms, with or without
modification, are permitted provided that the following conditions are met:
* Redistributions of source code must retain the above copyright notice, this
list of conditions and the following disclaimer.
* Redistributions in binary form must reproduce the above copyright notice,
this list of conditions and the following disclaimer in the documentation
and/or other materials provided with the distribution.
* Neither the name of the copyright holder nor the names of its
contributors may be used to endorse or promote products derived from
this software without specific prior written permission.
THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE LIABLE
FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR
SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER
CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY,
OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
================================================
FILE: MANIFEST.in
================================================
include LICENSE *.txt *.rst
include pysteps/pystepsrc
include pysteps/pystepsrc_schema.json
include pysteps/io/mch_lut_8bit_Metranet_AZC_V104.txt
include pysteps/io/mch_lut_8bit_Metranet_v103.txt
recursive-include pysteps *.pyx
include pyproject.toml
================================================
FILE: PKG-INFO
================================================
Metadata-Version: 1.2
Name: pysteps
Version: 1.20.0
Summary: Python framework for short-term ensemble prediction systems
Home-page: http://pypi.python.org/pypi/pysteps/
License: LICENSE
Description: =======
pySteps
=======
The pysteps initiative is a community that develops and maintains an easy to
use, modular, free and open-source python framework for short-term ensemble
prediction systems.
The focus is on probabilistic nowcasting of radar precipitation fields,
but pysteps is designed to allow a wider range of uses.
Platform: UNKNOWN
================================================
FILE: README.rst
================================================
pysteps - Python framework for short-term ensemble prediction systems
=====================================================================
.. start-badges
.. list-table::
:stub-columns: 1
:widths: 10 90
* - docs
- |stable| |colab| |gallery|
* - status
- |test| |docs| |codecov| |codacy| |black|
* - package
- |github| |conda| |pypi| |zenodo|
* - community
- |contributors| |downloads| |license|
.. |docs| image:: https://readthedocs.org/projects/pysteps/badge/?version=latest
:alt: Documentation Status
:target: https://pysteps.readthedocs.io/
.. |test| image:: https://github.com/pySTEPS/pysteps/workflows/Test%20pysteps/badge.svg
:alt: Test pysteps
:target: https://github.com/pySTEPS/pysteps/actions?query=workflow%3A"Test+Pysteps"
.. |black| image:: https://github.com/pySTEPS/pysteps/workflows/Check%20Black/badge.svg
:alt: Check Black
:target: https://github.com/pySTEPS/pysteps/actions?query=workflow%3A"Check+Black"
.. |codecov| image:: https://codecov.io/gh/pySTEPS/pysteps/branch/master/graph/badge.svg
:alt: Coverage
:target: https://codecov.io/gh/pySTEPS/pysteps
.. |github| image:: https://img.shields.io/github/release/pySTEPS/pysteps.svg
:target: https://github.com/pySTEPS/pysteps/releases/latest
:alt: Latest github release
.. |conda| image:: https://anaconda.org/conda-forge/pysteps/badges/version.svg
:target: https://anaconda.org/conda-forge/pysteps
:alt: Anaconda Cloud
.. |pypi| image:: https://badge.fury.io/py/pysteps.svg
:target: https://pypi.org/project/pysteps/
:alt: Latest PyPI version
.. |license| image:: https://img.shields.io/badge/License-BSD%203--Clause-blue.svg
:alt: License
:target: https://opensource.org/licenses/BSD-3-Clause
.. |contributors| image:: https://img.shields.io/github/contributors/pySTEPS/pysteps
:alt: GitHub contributors
:target: https://github.com/pySTEPS/pysteps/graphs/contributors
.. |downloads| image:: https://img.shields.io/conda/dn/conda-forge/pysteps
:alt: Conda downloads
:target: https://anaconda.org/conda-forge/pysteps
.. |colab| image:: https://colab.research.google.com/assets/colab-badge.svg
:alt: My first nowcast
:target: https://colab.research.google.com/github/pySTEPS/pysteps/blob/master/examples/my_first_nowcast.ipynb
.. |gallery| image:: https://img.shields.io/badge/example-gallery-blue.svg
:alt: pysteps example gallery
:target: https://pysteps.readthedocs.io/en/stable/auto_examples/index.html
.. |stable| image:: https://img.shields.io/badge/docs-stable-blue.svg
:alt: pysteps documentation
:target: https://pysteps.readthedocs.io/en/stable/
.. |codacy| image:: https://api.codacy.com/project/badge/Grade/6cff9e046c5341a4afebc0347362f8de
:alt: Codacy Badge
:target: https://app.codacy.com/gh/pySTEPS/pysteps?utm_source=github.com&utm_medium=referral&utm_content=pySTEPS/pysteps&utm_campaign=Badge_Grade
.. |zenodo| image:: https://zenodo.org/badge/140263418.svg
:alt: DOI
:target: https://zenodo.org/badge/latestdoi/140263418
.. end-badges
What is pysteps?
================
Pysteps is an open-source and community-driven Python library for probabilistic precipitation nowcasting, i.e. short-term ensemble prediction systems.
The aim of pysteps is to serve two different needs. The first is to provide a modular and well-documented framework for researchers interested in developing new methods for nowcasting and stochastic space-time simulation of precipitation. The second aim is to offer a highly configurable and easily accessible platform for practitioners ranging from weather forecasters to hydrologists.
The pysteps library supports standard input/output file formats and implements several optical flow methods as well as advanced stochastic generators to produce ensemble nowcasts. In addition, it includes tools for visualizing and post-processing the nowcasts and methods for deterministic, probabilistic, and neighbourhood forecast verification.
Quick start
-----------
Use pysteps to compute and plot a radar extrapolation nowcast in Google Colab with `this interactive notebook <https://colab.research.google.com/github/pySTEPS/pysteps/blob/master/examples/my_first_nowcast.ipynb>`_.
Installation
============
The recommended way to install pysteps is with `conda <https://docs.conda.io/>`_ from the conda-forge channel::
$ conda install -c conda-forge pysteps
More details can be found in the `installation guide <https://pysteps.readthedocs.io/en/stable/user_guide/install_pysteps.html>`_.
Usage
=====
Have a look at the `gallery of examples <https://pysteps.readthedocs.io/en/stable/auto_examples/index.html>`__ to get a good overview of what pysteps can do.
For a more detailed description of all the available methods, check the `API reference <https://pysteps.readthedocs.io/en/stable/pysteps_reference/index.html>`_ page.
Example data
============
A set of example radar data is available in a separate repository: `pysteps-data <https://github.com/pySTEPS/pysteps-data>`_.
More information on how to download and install them is available `here <https://pysteps.readthedocs.io/en/stable/user_guide/example_data.html>`_.
Contributions
=============
*We welcome contributions!*
For feedback, suggestions for developments, and bug reports please use the dedicated `issues page <https://github.com/pySTEPS/pysteps/issues>`_.
For more information, please read our `contributors guidelines <https://pysteps.readthedocs.io/en/stable/developer_guide/contributors_guidelines.html>`_.
Reference publications
======================
The overall library is described in
Pulkkinen, S., D. Nerini, A. Perez Hortal, C. Velasco-Forero, U. Germann,
A. Seed, and L. Foresti, 2019: Pysteps: an open-source Python library for
probabilistic precipitation nowcasting (v1.0). *Geosci. Model Dev.*, **12 (10)**,
4185–4219, doi:`10.5194/gmd-12-4185-2019 <https://doi.org/10.5194/gmd-12-4185-2019>`_.
While the more recent blending module is described in
Imhoff, R.O., L. De Cruz, W. Dewettinck, C.C. Brauer, R. Uijlenhoet, K-J. van Heeringen,
C. Velasco-Forero, D. Nerini, M. Van Ginderachter, and A.H. Weerts, 2023:
Scale-dependent blending of ensemble rainfall nowcasts and NWP in the open-source
pysteps library. *Q J R Meteorol Soc.*, 1-30,
doi: `10.1002/qj.4461 <https://doi.org/10.1002/qj.4461>`_.
Contributors
============
.. image:: https://contrib.rocks/image?repo=pySTEPS/pysteps
:target: https://github.com/pySTEPS/pysteps/graphs/contributors
================================================
FILE: ci/ci_test_env.yml
================================================
# pysteps development environment
name: test_environment
channels:
- conda-forge
- defaults
dependencies:
- python>=3.11
- pip
- mamba
# Minimal dependencies
- numpy
- cython
- jsmin
- jsonschema
- matplotlib
- netCDF4
- opencv
- pillow
- pyproj
- scipy
# Optional dependencies
- dask
- pyfftw
- cartopy
- h5py
- PyWavelets
- pandas
- scikit-image
- scikit-learn
- rasterio
- gdal
# Test dependencies
- pytest
- pytest-cov
- pip:
- cookiecutter
================================================
FILE: ci/fetch_pysteps_data.py
================================================
# -*- coding: utf-8 -*-
"""
Script used to install the pysteps data in a test environment and set a pystepsrc
configuration file that points to that data.
The test data is downloaded in the `PYSTEPS_DATA_PATH` environmental variable.
After this script is run, the `PYSTEPSRC` environmental variable should be set to
PYSTEPSRC=$PYSTEPS_DATA_PATH/pystepsrc for pysteps to use that configuration file.
"""
import os
from pysteps.datasets import create_default_pystepsrc, download_pysteps_data
tox_test_data_dir = os.environ["PYSTEPS_DATA_PATH"]
download_pysteps_data(tox_test_data_dir, force=True)
create_default_pystepsrc(
tox_test_data_dir, config_dir=tox_test_data_dir, file_name="pystepsrc"
)
================================================
FILE: ci/test_plugin_support.py
================================================
# -*- coding: utf-8 -*-
"""
Script to test the plugin support.
This script assumes that a package created with the default pysteps plugin template
(and using the default values) is installed.
https://github.com/pySTEPS/cookiecutter-pysteps-plugin
"""
from pysteps import io
print("Testing plugin support: ", end="")
assert hasattr(io.importers, "import_institution_name")
assert "institution_name" in io.interface._importer_methods
from pysteps.io.importers import import_institution_name
import_institution_name("filename")
print("PASSED")
================================================
FILE: doc/.gitignore
================================================
_build/
generated
auto_examples
================================================
FILE: doc/Makefile
================================================
# Minimal makefile for Sphinx documentation
#
# You can set these variables from the command line.
SPHINXOPTS =
SPHINXBUILD = sphinx-build
SPHINXPROJ = pysteps
SOURCEDIR = source
BUILDDIR = _build
# Put it first so that "make" without argument is like "make help".
help:
@$(SPHINXBUILD) -M help "$(SOURCEDIR)" "$(BUILDDIR)" $(SPHINXOPTS) $(O)
.PHONY: help Makefile
# Catch-all target: route all unknown targets to Sphinx using the new
# "make mode" option. $(O) is meant as a shortcut for $(SPHINXOPTS).
%: Makefile
@$(SPHINXBUILD) -M $@ "$(SOURCEDIR)" "$(BUILDDIR)" $(SPHINXOPTS) $(O)
================================================
FILE: doc/_static/pysteps.css
================================================
.section h1 {
border-bottom: 2px solid #0099ff;
display: inline-block;
}
.section h2 {
border-bottom: 2px solid #ccebff;
display: inline-block;
}
/* override table width restrictions */
@media screen and (min-width: 767px) {
.wy-table-responsive table td {
/* !important prevents the common CSS stylesheets from overriding
this as on RTD they are loaded after this stylesheet */
white-space: normal !important;
}
.wy-table-responsive {
overflow: visible !important;
}
}
================================================
FILE: doc/_templates/layout.html
================================================
{% extends "!layout.html" %}
{% set css_files = css_files + ["_static/pysteps.css"] %}
================================================
FILE: doc/make.bat
================================================
@ECHO OFF
pushd %~dp0
REM Command file for Sphinx documentation
if "%SPHINXBUILD%" == "" (
set SPHINXBUILD=sphinx-build
)
set SOURCEDIR=.
set BUILDDIR=_build
set SPHINXPROJ=pysteps
if "%1" == "" goto help
%SPHINXBUILD% >NUL 2>NUL
if errorlevel 9009 (
echo.
echo.The 'sphinx-build' command was not found. Make sure you have Sphinx
echo.installed, then set the SPHINXBUILD environment variable to point
echo.to the full path of the 'sphinx-build' executable. Alternatively you
echo.may add the Sphinx directory to PATH.
echo.
echo.If you don't have Sphinx installed, grab it from
echo.http://sphinx-doc.org/
exit /b 1
)
%SPHINXBUILD% -M %1 %SOURCEDIR% %BUILDDIR% %SPHINXOPTS%
goto end
:help
%SPHINXBUILD% -M help %SOURCEDIR% %BUILDDIR% %SPHINXOPTS%
:end
popd
================================================
FILE: doc/rebuild_docs.sh
================================================
# Build documentation from scratch.
rm -r source/generated &> /dev/null
rm -r source/auto_examples &> /dev/null
make clean
make html
================================================
FILE: doc/requirements.txt
================================================
# Additional requirements related to the documentation build only
sphinx
sphinxcontrib.bibtex
sphinx-book-theme
sphinx_gallery
scikit-image
scikit-learn
pandas
git+https://github.com/pySTEPS/pysteps-nwp-importers.git@main#egg=pysteps_nwp_importers
pygrib
h5py
================================================
FILE: doc/source/conf.py
================================================
# -*- coding: utf-8 -*-
# All configuration values have a default; values that are commented out
# serve to show the default.
import os
import subprocess
import sys
from datetime import datetime
import json
from jsmin import jsmin
# If extensions (or modules to document with autodoc) are in another directory,
# add these directories to sys.path here. If the directory is relative to the
# documentation root, use os.path.abspath to make it absolute, like shown here.
if "READTHEDOCS" not in os.environ:
sys.path.insert(1, os.path.abspath("../../"))
# -- General configuration ------------------------------------------------
# If your documentation needs a minimal Sphinx version, state it here.
#
needs_sphinx = "1.6"
# Add any Sphinx extension module names here, as strings. They can be
# extensions coming with Sphinx (named 'sphinx.ext.*') or your custom
# ones.
extensions = [
"sphinx.ext.autodoc",
"sphinx.ext.autosummary",
"sphinx.ext.coverage",
"sphinx.ext.mathjax",
"sphinx.ext.napoleon",
"sphinxcontrib.bibtex",
"sphinx_gallery.gen_gallery",
]
bibtex_bibfiles = ["references.bib"]
# numpydoc_show_class_members = False
# Add any paths that contain templates here, relative to this directory.
templates_path = ["_templates"]
# The suffix(es) of source filenames.
# You can specify multiple suffix as a list of string:
#
# source_suffix = ['.rst', '.md']
source_suffix = ".rst"
# The master toctree document.
master_doc = "index"
# General information about the project.
project = "pysteps"
copyright = f"2018-{datetime.now():%Y}, pysteps developers"
author = "pysteps developers"
# The version info for the project you're documenting, acts as replacement for
# |version| and |release|, also used in various other places throughout the
# built documents.
#
def get_version():
"""Returns project version as string from 'git describe' command."""
from subprocess import check_output
_version = check_output(["git", "describe", "--tags", "--always"])
if _version:
return _version.decode("utf-8")
else:
return "X.Y"
# The short X.Y version.
version = get_version().lstrip("v").rstrip().split("-")[0]
# The full version, including alpha/beta/rc tags.
release = version
# The language for content autogenerated by Sphinx. Refer to documentation
# for a list of supported languages.
#
# This is also used if you do content translation via gettext catalogs.
# Usually you set "language" from the command line for these cases.
language = "en"
# List of patterns, relative to source directory, that match files and
# directories to ignore when looking for source files.
# This patterns also effect to html_static_path and html_extra_path
exclude_patterns = ["_build", "Thumbs.db", ".DS_Store"]
# The name of the Pygments (syntax highlighting) style to use.
pygments_style = "sphinx"
# If true, `todo` and `todoList` produce output, else they produce nothing.
todo_include_todos = False
# -- Read the Docs build --------------------------------------------------
def set_root():
fn = os.path.abspath(os.path.join("..", "..", "pysteps", "pystepsrc"))
with open(fn, "r") as f:
rcparams = json.loads(jsmin(f.read()))
for key, value in rcparams["data_sources"].items():
new_path = os.path.join("..", "..", "pysteps-data", value["root_path"])
new_path = os.path.abspath(new_path)
value["root_path"] = new_path
fn = os.path.abspath(os.path.join("..", "..", "pystepsrc.rtd"))
with open(fn, "w") as f:
json.dump(rcparams, f, indent=4)
if "READTHEDOCS" in os.environ:
repourl = "https://github.com/pySTEPS/pysteps-data.git"
dir = os.path.join(os.getcwd(), "..", "..", "pysteps-data")
dir = os.path.abspath(dir)
subprocess.check_call(["rm", "-rf", dir])
subprocess.check_call(["git", "clone", repourl, dir])
os.environ["PYSTEPS_DATA_PATH"] = dir
set_root()
pystepsrc = os.path.abspath(os.path.join("..", "..", "pystepsrc.rtd"))
os.environ["PYSTEPSRC"] = pystepsrc
# -- Options for HTML output ----------------------------------------------
# The theme to use for HTML and HTML Help pages. See the documentation for
# a list of builtin themes.
#
# html_theme = 'alabaster'
# html_theme = 'classic'
html_theme = "sphinx_book_theme"
html_title = ""
html_context = {
"github_user": "pySTEPS",
"github_repo": "pysteps",
"github_version": "master",
"doc_path": "doc",
}
# Theme options are theme-specific and customize the look and feel of a theme
# further. For a list of options available for each theme, see the
# documentation.
#
html_theme_options = {
"repository_url": "https://github.com/pySTEPS/pysteps",
"repository_branch": "master",
"path_to_docs": "doc/source",
"use_edit_page_button": True,
"use_repository_button": True,
"use_issues_button": True,
}
# The name of an image file (relative to this directory) to place at the top
# of the sidebar.
html_logo = "../_static/pysteps_logo.png"
# Add any paths that contain custom static files (such as style sheets) here,
# relative to this directory. They are copied after the builtin static files,
# so a file named "default.css" will overwrite the builtin "default.css".
html_static_path = ["../_static"]
html_css_files = ["../_static/pysteps.css"]
# Custom sidebar templates, must be a dictionary that maps document names
# to template names.
#
# This is required for the alabaster theme
# refs: http://alabaster.readthedocs.io/en/latest/installation.html#sidebars
# html_sidebars = {
# "**": [
# "relations.html", # needs 'show_related': True theme option to display
# "searchbox.html",
# ]
# }
html_domain_indices = True
autosummary_generate = True
# -- Options for HTMLHelp output ------------------------------------------
# Output file base name for HTML help builder.
htmlhelp_basename = "pystepsdoc"
# -- Options for LaTeX output ---------------------------------------------
# This hack is taken from numpy (https://github.com/numpy/numpy/blob/master/doc/source/conf.py).
latex_preamble = r"""
\usepackage{amsmath}
\DeclareUnicodeCharacter{00A0}{\nobreakspace}
% In the parameters section, place a newline after the Parameters
% header
\usepackage{expdlist}
\let\latexdescription=\description
\def\description{\latexdescription{}{} \breaklabel}
% Make Examples/etc section headers smaller and more compact
\makeatletter
\titleformat{\paragraph}{\normalsize\py@HeaderFamily}%
{\py@TitleColor}{0em}{\py@TitleColor}{\py@NormalColor}
\titlespacing*{\paragraph}{0pt}{1ex}{0pt}
\makeatother
% Fix footer/header
\renewcommand{\chaptermark}[1]{\markboth{\MakeUppercase{\thechapter.\ #1}}{}}
\renewcommand{\sectionmark}[1]{\markright{\MakeUppercase{\thesection.\ #1}}}
"""
latex_elements = {
"papersize": "a4paper",
"pointsize": "10pt",
"preamble": latex_preamble,
# Latex figure (float) alignment
#
# 'figure_align': 'htbp',
}
latex_domain_indices = False
# Grouping the document tree into LaTeX files. List of tuples
# (source start file, target name, title,
# author, documentclass [howto, manual, or own class]).
latex_documents = [
(master_doc, "pysteps.tex", "pysteps reference", author, "manual"),
]
# -- Options for manual page output ---------------------------------------
# One entry per manual page. List of tuples
# (source start file, name, description, authors, manual section).
man_pages = [(master_doc, "pysteps", "pysteps reference", [author], 1)]
# -- Options for Texinfo output -------------------------------------------
# Grouping the document tree into Texinfo files. List of tuples
# (source start file, target name, title, author,
# dir menu entry, description, category)
texinfo_documents = [
(
master_doc,
"pysteps",
"pysteps reference",
author,
"pysteps",
"One line description of project.",
"Miscellaneous",
),
]
# -- Options for Sphinx-Gallery -------------------------------------------
# The configuration dictionary for Sphinx-Gallery
sphinx_gallery_conf = {
"examples_dirs": "../../examples", # path to your example scripts
"gallery_dirs": "auto_examples", # path where to save gallery generated examples
"filename_pattern": r"/*\.py", # Include all the files in the examples dir
}
================================================
FILE: doc/source/developer_guide/build_the_docs.rst
================================================
.. _build_the_docs:
=================
Building the docs
=================
The pysteps documentations is build using
`Sphinx <http://www.sphinx-doc.org/en/master/>`_,
a tool that makes it easy to create intelligent and beautiful documentation
The documentation is located in the **doc** folder in the pysteps repo.
Automatic build
---------------
The simplest way to build the documentation is using tox and the tox-conda
plugin (conda needed).
To install these packages activate your conda development environment and run::
conda install -c conda-forge tox tox-conda
Then, to build the documentation, from the repo's root run::
tox -e docs
This will create a conda environment will all the necessary dependencies and the
data needed to create the examples.
Manual build
------------
To build the docs you need to need to satisfy a few more dependencies
related to Sphinx that are specified in the doc/requirements.txt file:
- sphinx
- numpydoc
- sphinxcontrib.bibtex
- sphinx-book-theme
- sphinx_gallery
You can install these packages running `pip install -r doc/requirements.txt`.
In addition to this requirements, to build the example gallery in the
documentation the example pysteps-data is needed. To download and install this
data see the installation instructions in the :ref:`example_data` section.
Once these requirements are met, to build the documentation, in the **doc**
folder run::
make html
This will build the documentation along with the example gallery.
The build documentation (html web page) will be available in
**doc/_build/html/**.
To correctly visualize the documentation, you need to set up and run a local
HTTP server. To do that, in the **doc/_build/html/** directory run::
python -m http.server
This will set up a local HTTP server on 0.0.0.0 port 8000.
To see the built documentation open the following url in the browser:
http://0.0.0.0:8000/
================================================
FILE: doc/source/developer_guide/contributors_guidelines.rst
================================================
.. _contributor_guidelines:
.. include:: ../../../CONTRIBUTING.rst
================================================
FILE: doc/source/developer_guide/importer_plugins.rst
================================================
.. _importer-plugins:
===========================
Create your importer plugin
===========================
Since version 1.4, pysteps allows the users to add new importers by installing external
packages, called plugins, without modifying the pysteps installation. These plugins need
to follow a particular structure to allow pysteps to discover and integrate the new
importers to the pysteps interface without any user intervention.
.. contents:: Table of Contents
:local:
:depth: 3
How do the plugins work?
========================
When the plugin is installed, it advertises the new importers to other packages (in our
case, pysteps) using the python `entry points specification`_.
These new importers are automatically discovered every time that the pysteps library is
imported. The discovered importers are added as attributes to the io.importers module
and also registered to the io.get_method interface without any user intervention.
In addition, since the installation of the plugins does not modify the actual pysteps
installation (i.e., the pysteps sources), the pysteps library can be updated without
reinstalling the plugin.
.. _`entry points specification`: https://packaging.python.org/specifications/entry-points/
Create your plugin
==================
There are two ways of creating a plugin. The first one is building the importers plugin
from scratch. However, this can be a daunting task if you are creating your first plugin.
To facilitate the creating of new plugins, we provide a `Cookiecutter`_ template, in a
separate project, that creates a template project to be used as a starting point to build
the plugin.
The template for the pysteps plugins is maintained as a separate project at
`cookiecutter-pysteps-plugin <https://github.com/pySTEPS/cookiecutter-pysteps-plugin>`_.
For detailed instruction on how to create a plugin, `check the template's documentation`_.
.. _`check the template's documentation`: https://cookiecutter-pysteps-plugin.readthedocs.io/en/latest
.. _Cookiecutter: https://cookiecutter.readthedocs.io
================================================
FILE: doc/source/developer_guide/pypi.rst
================================================
.. _pypi_relase:
=============================
Packaging the pysteps project
=============================
The `Python Package Index <https://pypi.org/>`_ (PyPI) is a software
repository for the Python programming language. PyPI helps you find and
install software developed and shared by the Python community.
The following guide to package pysteps was adapted from the
`PyPI <https://packaging.python.org/tutorials/packaging-projects/#generating-distribution-archives>`_
official documentation.
Generating the source distribution
==================================
The first step is to generate a `source distribution
(sdist) <https://packaging.python.org/glossary/#term-source-distribution-or-sdist>`_
for the pysteps library. These are archives that are uploaded to the
`Package Index <https://pypi.org/>`_ and can be installed by pip.
To create the sdist package we need the **setuptools** package
installed.
Then, from the root folder of the pysteps source run::
python setup.py sdist
Once this command is completed, it should generate a tar.gz (source
archive) file the **dist** directory::
dist/
pysteps-a.b.c.tar.gz
where a.b.c denote the version number.
Uploading the source distribution to the archive
================================================
The last step is to upload your package to the `Python Package
Index <https://pypi.org/>`_.
**Important**
Before we actually upload the distribution to the Python Index, we will
test it in `Test PyPI <https://test.pypi.org/>`_. Test PyPI is a
separate instance of the package index that allows us to try the
distribution without affecting the real index (PyPi). Because TestPyPI
has a separate database from the actual PyPI, you’ll need a separate
user account for specifically for TestPyPI. You can register your
account in https://test.pypi.org/account/register/.
Once you are registered, you can use
`twine <https://twine.readthedocs.io/en/latest/#twine-user-documentation>`_
to upload the distribution packages. Alternatively, the package can be
uploaded manually from the **Test PyPI** page.
If Twine is not installed, you can install it by running
``pip install twine`` or ``conda install twine``.
Test PyPI
^^^^^^^^^
To upload the recently created source distribution
(**dist/pysteps-a.b.c.tar.gz**) under the **dist** directory run::
twine upload --repository-url https://test.pypi.org/legacy/ dist/pysteps-a.b.c.tar.gz
where a.b.c denote the version number.
You will be prompted for the username and password you registered with
Test PyPI. After the command completes, you should see output similar to
this::
Uploading distributions to https://test.pypi.org/legacy/
Enter your username: [your username]
Enter your password:
Uploading pysteps-a.b.c.tar.gz
100%|█████████████████████| 4.25k/4.25k [00:01<00:00, 3.05kB/s]
Once uploaded your package should be viewable on TestPyPI, for example,
https://test.pypi.org/project/pysteps
Test the uploaded package
-------------------------
Before uploading the package to the official `Python Package
Index <https://pypi.org/>`_, test that the package can be installed
using pip.
Automatic test
^^^^^^^^^^^^^^
The simplest way to hat the package can be installed using pip is using tox
and the tox-conda plugin (conda needed).
To install these packages activate your conda development environment and run::
conda install -c conda-forge tox tox-conda
Then, to test the installation in a minimal and an environment with all the
dependencies (full env), run::
tox -r -e pypi_test # Test the installation in a minimal env
tox -r -e pypi_test_full # Test the installation in an full env
Manual test
^^^^^^^^^^^
To manually test the installation on new environment,
create a copy of the basic development environment using the
`environment_dev.yml <https://github.com/pySTEPS/pysteps/blob/master/environment_dev.yml>`_
file in the root folder of the pysteps project::
conda env create -f environment_dev.yml -n pysteps_test
Then we activate the environment::
source activate pysteps_test
or::
conda activate pysteps_test
If the environment pysteps_test was already created, remove any version of
pysteps already installed::
pip uninstall pysteps
Now, install the pysteps package from test.pypi.org.
Since not all the dependecies are available in the Test PyPI repository, we need to add the official repo as an extra index to pip. By doing so, pip will look first in the Test PyPI index and then in the official PyPI::
pip install --no-cache-dir --index-url https://test.pypi.org/simple/ --extra-index-url=https://pypi.org/simple/ pysteps
To test that the installation was successful, from a folder different
than the pysteps source, run::
pytest --pyargs pysteps
If any test didn't pass, check the sources or consider creating a new release
fixing those bugs.
Upload package to PyPi
----------------------
Once the
`sdist <https://packaging.python.org/glossary/#term-source-distribution-or-sdist>`_
package was tested, we can safely upload it to the Official PyPi
repository with::
twine upload dist/pysteps-a.b.c.tar.gz
Now, **pysteps** can be installed by simply running::
pip install pysteps
As an extra sanity measure, it is recommended to test the pysteps package
installed from the Official PyPi repository
(instead of the test PyPi).
Automatic test
^^^^^^^^^^^^^^
Similarly to the `Test the uploaded package`_ section, to test the
installation from PyPI in a clean environment, run::
tox -r -e pypi
Manual test
^^^^^^^^^^^
Follow test instructions in `Test PyPI`_ section.
================================================
FILE: doc/source/developer_guide/test_pysteps.rst
================================================
.. _testing_pysteps:
===============
Testing pysteps
===============
The pysteps distribution includes a small test suite for some of the
modules. To run the tests the `pytest <https://docs.pytest.org>`__
package is needed. To install it, in a terminal run::
pip install pytest
Automatic testing
=================
The simplest way to run the pysteps' test suite is using tox and the tox-conda
plugin (conda needed).
To install these packages activate your conda development environment and run::
conda install -c conda-forge tox tox-conda
Then, to run the tests, from the repo's root run::
tox # Run pytests
tox -e install # Test package installation
tox -e black # Test for black formatting warnings
Manual testing
==============
Example data
------------
The build-in tests require the pysteps example data installed.
See the installation instructions in the :ref:`example_data` section.
Test an installed package
-------------------------
After the package is installed, you can launch the test suite from any
directory by running::
pytest --pyargs pysteps
Test from sources
-----------------
Before testing the package directly from the sources, we need to build
the extensions in-place. To do that, from the root pysteps folder run::
python setup.py build_ext -i
Now, the package sources can be tested in-place using the **pytest**
command on the root of the pysteps source directory. E.g.::
pytest -v --tb=line
================================================
FILE: doc/source/developer_guide/update_conda_forge.rst
================================================
.. _update_conda_feedstock:
==========================================
Updating the conda-forge pysteps-feedstock
==========================================
.. _pysteps-feedstock: https://github.com/conda-forge/pysteps-feedstock
.. _`conda-forge/pysteps-feedstock`: https://github.com/conda-forge/pysteps-feedstock
Here we will describe the steps to update the pysteps conda-forge feedstock.
This tutorial is intended for the core developers listed as maintainers of the
conda recipe in the `conda-forge/pysteps-feedstock`_.
Examples for needing to update the pysteps-feedstock are:
* New release
* Fix errors pysteps package errors
**The following tutorial was adapted from the official conda-forge.org documentation, released
under CC4.0 license**
What is a “conda-forge”
=======================
Conda-forge is a community effort that provides conda packages for a wide range of software.
The conda team from Anaconda packages a multitude of packages and provides them to all users
free of charge in their default channel.
**conda-forge** is a community-led conda channel of installable packages that allows users to share software
that is not included in the official Anaconda repository. The main advantages of **conda-forge** are:
- all packages are shared in a single channel named conda-forge
- care is taken that all packages are up-to-date
- common standards ensure that all packages have compatible versions
- by default, packages are built for macOS, linux amd64 and windows amd64
In order to provide high-quality builds, the process has been automated into the conda-forge GitHub organization.
The conda-forge organization contains one repository for each of the installable packages.
Such a repository is known as a **feedstock**.
The actual pysteps feedstock is https://github.com/conda-forge/pysteps-feedstock
A feedstock is made up of a conda recipe (the instructions on what and how to build the package) and the
necessary configurations for automatic building using freely available continuous integration services.
See the official `conda-forge documentation <http://conda-forge.org/docs/user/00_intro.html>`_ for more details.
Maintain pysteps conda-forge package
====================================
Pysteps core developers that are maintainers of the pysteps feedstock.
All pysteps developers listed as maintainers of the pysteps feedstock are given push access to the feedstock repository.
This means that a maintainer can create branches in the main repository.
Every time that a new commit is pushed/merged in the feedstock repository, conda-forge runs Continuous Integration (CI)
system that run quality checks, builds the pysteps recipe on Windows, OSX, and Linux, and publish the built recipes in
the conda-forge channel.
Important
---------
For updates, using a branch in the main repo and a subsequent Pull Request (PR) to the master branch is discouraged because:
- CI is run on both the branch and on the Pull Request (if any) associated with that branch. This wastes CI resources.
- Branches are automatically published by the CI system. This mean that a for every push, the packages will be published
before the PR is actually merged.
For these reasons, to update the feedstock, the maintainers need to fork the feedstock, create a new branch in that
fork, push to that branch in the fork, and then open a PR to the conda-forge repo.
Workflow for updating a pysteps-feedstock
-----------------------------------------
The mandatory steps to update the pysteps-feedstock_ are:
1. Forking the pysteps-feedstock_.
* Clone the forked repository in your computer::
git clone https://github.com/<your-github-id>/pysteps-feedstock
#. Syncing your fork with the pysteps feedstock. This step is only needed if your local repository is not up to date
the pysteps-feedstock_. If you just cloned the forked pysteps-feedstock_, you can ignore this step.
* Make sure you are on the master branch::
git checkout master
* Register conda-forge’s feedstock with::
git remote add upstream https://github.com/conda-forge/pysteps-feedstock
* Fetch the latest updates with git fetch upstream::
git fetch upstream
* Pull in the latest changes into your master branch::
git rebase upstream/master
#. Create a new branch::
git checkout -b <branch-name>
#. Update the recipe and push changes in this new branch
* See next section "Updating recipes" for more details
* Push changes::
git commit -m <commit message>
#. Pushing your changes to GitHub::
git push origin <branch-name>
#. Propose a Pull Request
* Create a pull request via the web interface
Updating pysteps recipe
=======================
The pysteps-feedstock_ should be updated when:
* We release a new pysteps version
* Need to fix errors in the pysteps package
New release
-----------
When a new pysteps version is released, before update the pysteps feedstock, the new version needs to be uploaded
to the Python Package Index (PyPI) (see :ref:`pypi_relase` for more details).
This step is needed because the conda recipe uses the PyPI to build the pysteps conda package.
Once the new version is available in the PyPI, the conda recipe in pysteps-feedstock/recipe/meta.yaml
needs to be updated by:
1. Updating version and hash
#. Checking the dependencies
#. Bumping the build number
- When the package version changes, reset the build number back to 0.
- The build number is increased when the source code for the package has
not changed but you need to make a new build.
- In case that the recipe must be updated, increase by 1 the
**build_number** in the conda recipe in
`pysteps-feedstock/recipe/meta.yaml <https://github.com/conda-forge/pysteps-feedstock/blob/master/recipe/meta.yaml>`_.
Some examples for needing to increase the build number are:
- updating the pinned dependencies
- Fixing wrong dependencies
#. Rerendering feedstocks
- Rerendering is conda-forge’s way to update the files common to
all feedstocks (e.g. README, CI configuration, pinned dependencies).
- When to rerender:
We need to re-render when there are changes the following parts of the
feedstock:
- the platform configuration (skip sections)
- the yum_requirements.txt
- updates in the build matrix due to new versions of Python, NumPy,
PERL, R, etc.
- updates in conda-forge pinning that affect the feedstock
- build issues that a feedstock configuration update will fix
- To rerender the feedstock, the first step is to install **conda-smithy**
in your root environment::
conda install -c conda-forge conda-smithy
- Commit all changes and from the root directory of the feedstock, type::
conda smithy rerender -c auto
Optionally one can commit the changes manually.
To do this drop *-c auto* from the command.
More information on https://conda-forge.org/docs/maintainer/updating_pkgs.html#dev-rerender-local
conda-forge autotick bot
------------------------
The conda-forge autotick bot is now a central part of the conda-forge
ecosystem.
The conda-forge autotick bot was created to track out-of-date feedstocks and
issue pull requests with updated recipes.
The bot tracks and updates out-of-date feedstocks in four steps:
- Find the names of all feedstocks on conda-forge.
- Compute the dependency graph of packages on conda-forge found in step 1.
- Find the most recent version of each feedstock’s source code.
- Open a PR into each out-of-date feedstock updating the meta.yaml for the most recent upstream release.
These steps are run automatically every six hours.
Hence, when a new pysteps version is upload to PyPI, this bot will
automatically update the recipe and submit a PR.
If the tests in the PR pass, then it can be merger into the
feedstock's master branch.
================================================
FILE: doc/source/index.rst
================================================
pysteps -- The nowcasting initiative
====================================
Pysteps is a community-driven initiative for developing and maintaining an easy
to use, modular, free and open source Python framework for short-term ensemble
prediction systems.
The focus is on probabilistic nowcasting of radar precipitation fields,
but pysteps is designed to allow a wider range of uses.
Pysteps is actively developed on GitHub__, while a more thorough description
of pysteps is available in the pysteps reference publications:
.. note::
Pulkkinen, S., D. Nerini, A. Perez Hortal, C. Velasco-Forero, U. Germann,
A. Seed, and L. Foresti, 2019: Pysteps: an open-source Python library for
probabilistic precipitation nowcasting (v1.0). *Geosci. Model Dev.*, **12 (10)**,
4185–4219, doi:`10.5194/gmd-12-4185-2019 <https://doi.org/10.5194/gmd-12-4185-2019>`_.
Imhoff, R.O., L. De Cruz, W. Dewettinck, C.C. Brauer, R. Uijlenhoet, K-J. van Heeringen,
C. Velasco-Forero, D. Nerini, M. Van Ginderachter, and A.H. Weerts, 2023:
Scale-dependent blending of ensemble rainfall nowcasts and NWP in the open-source
pysteps library. *Q J R Meteorol Soc.*, 1-30,
doi: `doi:10.1002/qj.4461 <https://doi.org/10.1002/qj.4461>`_.
__ https://github.com/pySTEPS/pysteps
.. toctree::
:maxdepth: 1
:hidden:
:caption: For users
Installation <user_guide/install_pysteps>
Gallery <../auto_examples/index>
My first nowcast (Colab Notebook) <https://colab.research.google.com/github/pySTEPS/pysteps/blob/master/examples/my_first_nowcast.ipynb>
API Reference <pysteps_reference/index>
Example data <user_guide/example_data>
Configuration file (pystepsrc) <user_guide/set_pystepsrc>
Machine learning applications <user_guide/machine_learning_pysteps>
Bibliography <zz_bibliography>
.. toctree::
:maxdepth: 1
:hidden:
:caption: For developers
Contributing Guide <developer_guide/contributors_guidelines>
Importer plugins <developer_guide/importer_plugins>
Testing <developer_guide/test_pysteps>
Building the docs <developer_guide/build_the_docs>
Packaging <developer_guide/pypi>
Publishing to conda-forge <developer_guide/update_conda_forge>
GitHub repository <https://github.com/pySTEPS/pysteps>
================================================
FILE: doc/source/pysteps_reference/blending.rst
================================================
================
pysteps.blending
================
Implementation of blending methods for blending (ensemble) nowcasts with Numerical Weather Prediction (NWP) models.
.. automodule:: pysteps.blending.interface
.. automodule:: pysteps.blending.clim
.. automodule:: pysteps.blending.ens_kalman_filter_methods
.. automodule:: pysteps.blending.linear_blending
.. automodule:: pysteps.blending.pca_ens_kalman_filter
.. automodule:: pysteps.blending.skill_scores
.. automodule:: pysteps.blending.steps
.. automodule:: pysteps.blending.utils
================================================
FILE: doc/source/pysteps_reference/cascade.rst
================================================
===============
pysteps.cascade
===============
Methods for constructing bandpass filters and decomposing 2d precipitation
fields into different spatial scales.
.. automodule:: pysteps.cascade.interface
.. automodule:: pysteps.cascade.bandpass_filters
.. automodule:: pysteps.cascade.decomposition
================================================
FILE: doc/source/pysteps_reference/datasets.rst
================================================
.. automodule:: pysteps.datasets
================================================
FILE: doc/source/pysteps_reference/decorators.rst
================================================
.. automodule:: pysteps.decorators
================================================
FILE: doc/source/pysteps_reference/downscaling.rst
================================================
===================
pysteps.downscaling
===================
Implementation of deterministic and ensemble downscaling methods.
.. automodule:: pysteps.downscaling.interface
.. automodule:: pysteps.downscaling.rainfarm
================================================
FILE: doc/source/pysteps_reference/extrapolation.rst
================================================
=====================
pysteps.extrapolation
=====================
Extrapolation module functions and interfaces.
.. automodule:: pysteps.extrapolation.interface
.. automodule:: pysteps.extrapolation.semilagrangian
================================================
FILE: doc/source/pysteps_reference/feature.rst
================================================
===============
pysteps.feature
===============
Implementations of feature detection methods.
.. automodule:: pysteps.feature.interface
.. automodule:: pysteps.feature.blob
.. automodule:: pysteps.feature.tstorm
.. automodule:: pysteps.feature.shitomasi
================================================
FILE: doc/source/pysteps_reference/index.rst
================================================
.. _pysteps-reference:
API Reference
=============
:Release: |version|
:Date: |today|
This page gives an comprehensive description of all the modules and functions
available in pysteps.
.. toctree::
:maxdepth: 2
:caption: API Reference
pysteps
blending
cascade
decorators
extrapolation
datasets
downscaling
feature
io
motion
noise
nowcasts
postprocessing
timeseries
tracking
utils
verification
visualization
.. only:: html
Indices and tables
==================
* :ref:`genindex`
* :ref:`modindex`
* :ref:`search`
.. only:: html
Bibliography
------------
* :ref:`bibliography`
================================================
FILE: doc/source/pysteps_reference/io.rst
================================================
==========
pysteps.io
==========
Methods for browsing data archives, reading 2d precipitation fields and writing
forecasts into files.
.. automodule:: pysteps.io.interface
.. automodule:: pysteps.io.archive
.. automodule:: pysteps.io.importers
.. automodule:: pysteps.io.nowcast_importers
.. automodule:: pysteps.io.exporters
.. automodule:: pysteps.io.readers
================================================
FILE: doc/source/pysteps_reference/motion.rst
================================================
==============
pysteps.motion
==============
Implementations of optical flow methods.
.. automodule:: pysteps.motion.interface
.. automodule:: pysteps.motion.constant
.. automodule:: pysteps.motion.darts
.. automodule:: pysteps.motion.lucaskanade
.. automodule:: pysteps.motion.proesmans
.. automodule:: pysteps.motion.vet
================================================
FILE: doc/source/pysteps_reference/noise.rst
================================================
=============
pysteps.noise
=============
Implementation of deterministic and ensemble nowcasting methods.
.. automodule:: pysteps.noise.interface
.. automodule:: pysteps.noise.fftgenerators
.. automodule:: pysteps.noise.motion
.. automodule:: pysteps.noise.utils
================================================
FILE: doc/source/pysteps_reference/nowcasts.rst
================================================
================
pysteps.nowcasts
================
Implementation of deterministic and ensemble nowcasting methods.
.. automodule:: pysteps.nowcasts.interface
.. automodule:: pysteps.nowcasts.anvil
.. automodule:: pysteps.nowcasts.extrapolation
.. automodule:: pysteps.nowcasts.linda
.. automodule:: pysteps.nowcasts.lagrangian_probability
.. automodule:: pysteps.nowcasts.sprog
.. automodule:: pysteps.nowcasts.sseps
.. automodule:: pysteps.nowcasts.steps
.. automodule:: pysteps.nowcasts.utils
================================================
FILE: doc/source/pysteps_reference/postprocessing.rst
================================================
======================
pysteps.postprocessing
======================
Methods for post-processing of forecasts.
.. automodule:: pysteps.postprocessing.ensemblestats
.. automodule:: pysteps.postprocessing.probmatching
================================================
FILE: doc/source/pysteps_reference/pysteps.rst
================================================
=======
pysteps
=======
Pystep top module utils
.. autosummary::
:toctree: ../generated/
pysteps.load_config_file
================================================
FILE: doc/source/pysteps_reference/timeseries.rst
================================================
==================
pysteps.timeseries
==================
Methods and models for time series analysis.
.. automodule:: pysteps.timeseries.autoregression
.. automodule:: pysteps.timeseries.correlation
================================================
FILE: doc/source/pysteps_reference/tracking.rst
================================================
================
pysteps.tracking
================
Implementations of feature tracking methods.
.. automodule:: pysteps.tracking.interface
.. automodule:: pysteps.tracking.lucaskanade
.. automodule:: pysteps.tracking.tdating
================================================
FILE: doc/source/pysteps_reference/utils.rst
================================================
=============
pysteps.utils
=============
Implementation of miscellaneous utility functions.
.. automodule:: pysteps.utils.interface
.. automodule:: pysteps.utils.arrays
.. automodule:: pysteps.utils.cleansing
.. automodule:: pysteps.utils.conversion
.. automodule:: pysteps.utils.dimension
.. automodule:: pysteps.utils.fft
.. automodule:: pysteps.utils.images
.. automodule:: pysteps.utils.interpolate
.. automodule:: pysteps.utils.pca
.. automodule:: pysteps.utils.reprojection
.. automodule:: pysteps.utils.spectral
.. automodule:: pysteps.utils.tapering
.. automodule:: pysteps.utils.transformation
================================================
FILE: doc/source/pysteps_reference/verification.rst
================================================
====================
pysteps.verification
====================
Methods for verification of deterministic, probabilistic and ensemble forecasts.
.. automodule:: pysteps.verification.interface
.. automodule:: pysteps.verification.detcatscores
.. automodule:: pysteps.verification.detcontscores
.. automodule:: pysteps.verification.ensscores
.. automodule:: pysteps.verification.lifetime
.. automodule:: pysteps.verification.plots
.. automodule:: pysteps.verification.probscores
.. automodule:: pysteps.verification.salscores
.. automodule:: pysteps.verification.spatialscores
================================================
FILE: doc/source/pysteps_reference/visualization.rst
================================================
=====================
pysteps.visualization
=====================
Methods for plotting precipitation and motion fields.
.. automodule:: pysteps.visualization.animations
.. automodule:: pysteps.visualization.basemaps
.. automodule:: pysteps.visualization.motionfields
.. automodule:: pysteps.visualization.precipfields
.. automodule:: pysteps.visualization.spectral
.. automodule:: pysteps.visualization.thunderstorms
.. automodule:: pysteps.visualization.utils
================================================
FILE: doc/source/references.bib
================================================
@TECHREPORT{BPS2004,
AUTHOR = "N. E. Bowler and C. E. Pierce and A. W. Seed",
TITLE = "{STEPS}: A probabilistic precipitation forecasting scheme which merges an extrapolation nowcast with downscaled {NWP}",
INSTITUTION = "UK Met Office",
TYPE = "Forecasting Research Technical Report",
NUMBER = 433,
ADDRESS = "Wallingford, United Kingdom",
YEAR = 2004,
}
@ARTICLE{BPS2006,
AUTHOR = "N. E. Bowler and C. E. Pierce and A. W. Seed",
TITLE = "{STEPS}: A probabilistic precipitation forecasting scheme which merges an extrapolation nowcast with downscaled {NWP}",
JOURNAL = "Quarterly Journal of the Royal Meteorological Society",
VOLUME = 132,
NUMBER = 620,
PAGES = "2127--2155",
YEAR = 2006,
DOI = "10.1256/qj.04.100"
}
@ARTICLE{BS2007,
AUTHOR = "J. Br{\"o}cker and L. A. Smith",
TITLE = "Increasing the Reliability of Reliability Diagrams",
JOURNAL = "Weather and Forecasting",
VOLUME = 22,
NUMBER = 3,
PAGES = "651--661",
YEAR = 2007,
DOI = "10.1175/WAF993.1"
}
@BOOK{CP2002,
AUTHOR = "A. Clothier and G. Pegram",
TITLE = "Space-time modelling of rainfall using the string of beads model: integration of radar and raingauge data",
SERIES = "WRC Report No. 1010/1/02",
PUBLISHER = "Water Research Commission",
ADDRESS = "Durban, South Africa",
YEAR = 2002
}
@ARTICLE{CRS2004,
AUTHOR = "B. Casati and G. Ross and D. B. Stephenson",
TITLE = "A New Intensity-Scale Approach for the Verification of Spatial Precipitation Forecasts",
VOLUME = 11,
NUMBER = 2,
JOURNAL = "Meteorological Applications",
PAGES = "141-–154",
YEAR = 2004,
DOI = "10.1017/S1350482704001239"
}
@ARTICLE{DOnofrio2014,
TITLE = "Stochastic rainfall downscaling of climate models",
AUTHOR = "D'Onofrio, D and Palazzi, E and von Hardenberg, J and Provenzale, A and Calmanti, S",
JOURNAL = "J. Hydrometeorol.",
PUVLISHER = "American Meteorological Society",
VOLUME = 15,
NUMBER = 2,
PAGES = "830--843",
YEAR = 2014,
}
@ARTICLE{EWWM2013,
AUTHOR = "E. Ebert and L. Wilson and A. Weigel and M. Mittermaier and P. Nurmi and P. Gill and M. Göber and S. Joslyn and B. Brown and T. Fowler and A. Watkins",
TITLE = "Progress and challenges in forecast verification",
JOURNAL = "Meteorological Applications",
VOLUME = 20,
NUMBER = 2,
PAGES = "130--139",
YEAR = 2013,
DOI = "10.1002/met.1392"
}
@ARTICLE{Feldmann2021,
AUTHOR = "M. Feldmann and U. Germann and M. Gabella and A. Berne",
TITLE = "A Characterisation of Alpine Mesocyclone Occurrence",
JOURNAL = "Weather and Climate Dynamics Discussions",
PAGES = "1--26",
URL = "https://wcd.copernicus.org/preprints/wcd-2021-53/",
DOI = "10.5194/wcd-2021-53",
YEAR = 2021
}
@ARTICLE{FSNBG2019,
AUTHOR = "Foresti, L. and Sideris, I.V. and Nerini, D. and Beusch, L. and Germann, U.",
TITLE = "Using a 10-Year Radar Archive for Nowcasting Precipitation Growth and Decay: A Probabilistic Machine Learning Approach",
JOURNAL = "Weather and Forecasting",
VOLUME = 34,
PAGES = "1547--1569",
YEAR = 2019,
DOI = "10.1175/WAF-D-18-0206.1"
}
@ARTICLE{FNPC2020,
AUTHOR = "Franch, G. and Nerini, D. and Pendesini, M. and Coviello, L. and Jurman, G. and Furlanello, C.",
TITLE = "Precipitation Nowcasting with Orographic Enhanced Stacked Generalization: Improving Deep Learning Predictions on Extreme Events",
JOURNAL = "Atmosphere",
VOLUME = 11,
NUMBER = 3,
PAGES = "267",
YEAR = 2020,
DOI = "10.3390/atmos11030267"
}
@ARTICLE{FW2005,
AUTHOR = "N. I. Fox and C. K. Wikle",
TITLE = "A Bayesian Quantitative Precipitation Nowcast Scheme",
JOURNAL = "Weather and Forecasting",
VOLUME = 20,
NUMBER = 3,
PAGES = "264--275",
YEAR = 2005
}
@ARTICLE{GZ2002,
AUTHOR = "U. Germann and I. Zawadzki",
TITLE = "Scale-Dependence of the Predictability of Precipitation from Continental Radar Images. {P}art {I}: Description of the Methodology",
JOURNAL = "Monthly Weather Review",
VOLUME = 130,
NUMBER = 12,
PAGES = "2859--2873",
YEAR = 2002,
DOI = "10.1175/1520-0493(2002)130<2859:SDOTPO>2.0.CO;2"
}
@ARTICLE{GZ2004,
AUTHOR = "U. Germann and I. Zawadzki",
TITLE = "Scale-Dependence of the Predictability of Precipitation from Continental Radar Images. {P}art {II}: Probability Forecasts",
JOURNAL = "Journal of Applied Meteorology",
VOLUME = 43,
NUMBER = 1,
PAGES = "74--89",
YEAR = 2004,
DOI = "10.1175/1520-0450(2004)043<0074:SDOTPO>2.0.CO;2"
}
@ARTICLE{Her2000,
AUTHOR = "H. Hersbach",
TITLE = "Decomposition of the Continuous Ranked Probability Score for Ensemble Prediction Systems",
JOURNAL = "Weather and Forecasting",
VOLUME = 15,
NUMBER = 5,
PAGES = "559--570",
YEAR = 2000,
DOI = "10.1175/1520-0434(2000)015<0559:DOTCRP>2.0.CO;2"
}
@article{Hwang2015,
AUTHOR = "Hwang, Yunsung and Clark, Adam J and Lakshmanan, Valliappa and Koch, Steven E",
TITLE = "Improved nowcasts by blending extrapolation and model forecasts",
JOURNAL = "Weather and Forecasting",
VOLUME = 30,
NUMBER = 5,
PAGES = "1201--1217",
YEAR = 2015,
DOI = "10.1175/WAF-D-15-0057.1"
}
@ARTICLE{LZ1995,
AUTHOR = "S. Laroche and I. Zawadzki",
TITLE = "Retrievals of Horizontal Winds from Single-Doppler Clear-Air Data by Methods of Cross Correlation and Variational Analysis",
JOURNAL = "Journal of Atmospheric and Oceanic Technology",
VOLUME = 12,
NUMBER = 4,
PAGES = "721--738",
YEAR = 1995,
DOI = "10.1175/1520-0426(1995)012<0721:ROHWFS>2.0.CO;2",
}
@ARTICLE{NBSG2017,
AUTHOR = "D. Nerini and N. Besic and I. Sideris and U. Germann and L. Foresti",
TITLE = "A non-stationary stochastic ensemble generator for radar rainfall fields based on the short-space {F}ourier transform",
JOURNAL = "Hydrology and Earth System Sciences",
VOLUME = 21,
NUMBER = 6,
YEAR = 2017,
PAGES = "2777--2797",
DOI = "10.5194/hess-21-2777-2017"
}
@ARTICLE{PCH2018,
AUTHOR = "S. Pulkkinen and V. Chandrasekar and A.-M. Harri",
TITLE = "Nowcasting of Precipitation in the High-Resolution {D}allas-{F}ort {W}orth ({DFW}) Urban Radar Remote Sensing Network",
JOURNAL = "IEEE Journal of Selected Topics in Applied Earth Observations and Remote Sensing",
VOLUME = 11,
NUMBER = 8,
PAGES = "2773--2787",
YEAR = 2018,
DOI = "10.1109/JSTARS.2018.2840491"
}
@ARTICLE{PCH2019a,
AUTHOR = "S. Pulkkinen and V. Chandrasekar and A.-M. Harri",
TITLE = "Fully Spectral Method for Radar-Based Precipitation Nowcasting",
JOURNAL = "IEEE Journal of Selected Topics in Applied Earth Observations and Remote Sensing",
VOLUME = 12,
NUMBER = 5,
PAGES = "1369-1382",
YEAR = 2018
}
@ARTICLE{PCH2019b,
AUTHOR = "S. Pulkkinen and V. Chandrasekar and A.-M. Harri",
TITLE = "Stochastic Spectral Method for Radar-Based Probabilistic Precipitation Nowcasting",
JOURNAL = "Journal of Atmospheric and Oceanic Technology",
VOLUME = 36,
NUMBER = 6,
PAGES = "971--985",
YEAR = 2019
}
@ARTICLE{PCLH2020,
AUTHOR = "S. Pulkkinen and V. Chandrasekar and A. von Lerber and A.-M. Harri",
TITLE = "Nowcasting of Convective Rainfall Using Volumetric Radar Observations",
JOURNAL = "IEEE Transactions on Geoscience and Remote Sensing",
DOI = "10.1109/TGRS.2020.2984594",
PAGES = "1--15",
YEAR = 2020
}
@ARTICLE{PCN2021,
AUTHOR = "S. Pulkkinen and V. Chandrasekar and T. Niemi",
TITLE = "Lagrangian Integro-Difference Equation Model for Precipitation Nowcasting",
JOURNAL = "Journal of Atmospheric and Oceanic Technology",
NOTE = "submitted",
YEAR = 2021
}
@INCOLLECTION{PGPO1994,
AUTHOR = "M. Proesmans and L. van Gool and E. Pauwels and A. Oosterlinck",
TITLE = "Determination of optical flow and its discontinuities using non-linear diffusion",
BOOKTITLE = "Computer Vision — ECCV '94",
VOLUME = 801,
SERIES = "Lecture Notes in Computer Science",
EDITOR = "J.-O. Eklundh",
PUBLISHER = "Springer Berlin Heidelberg",
PAGES = "294--304",
YEAR = 1994
}
@ARTICLE{RC2011,
AUTHOR = "E. Ruzanski and V. Chandrasekar",
JOURNAL = "IEEE Transactions on Geoscience and Remote Sensing",
TITLE = "Scale Filtering for Improved Nowcasting Performance in a High-Resolution {X}-Band Radar Network",
VOLUME = 49,
NUMBER = 6,
PAGES="2296--2307",
MONTH = "June",
YEAR=2011
}
@ARTICLE{Ravuri2021,
AUTHOR = "Ravuri, Suman and Lenc, Karel and Willson, Matthew and Kangin, Dmitry and Lam, Remi and Mirowski, Piotr and Fitzsimons, Megan and Athanassiadou, Maria and Kashem, Sheleem and Madge, Sam and Prudden, Rachel and Mandhane, Amol and Clark, Aidan and Brock, Andrew and Simonyan, Karen and Hadsell, Raia and Robinson, Niall and Clancy, Ellen and Arribas, Alberto and Mohamed, Shakir",
JOURNAL = "Nature",
TITLE = "Skilful precipitation nowcasting using deep generative models of radar",
VOLUME = 597,
NUMBER = 7878,
PAGES = "672--677",
YEAR = 2011,
DOI = "10.1038/s41586-021-03854-z",
}
@ARTICLE{RCW2011,
AUTHOR = "E. Ruzanski and V. Chandrasekar and Y. Wang",
TITLE = "The {CASA} Nowcasting System",
JOURNAL = "Journal of Atmospheric and Oceanic Technology",
VOLUME = 28,
NUMBER = 5,
PAGES = "640--655",
YEAR = 2011,
DOI = "10.1175/2011JTECHA1496.1"
}
@ARTICLE{RL2008,
AUTHOR = "N. M. Roberts and H. W. Lean",
TITLE = "Scale-Selective Verification of Rainfall Accumulations from High-Resolution Forecasts of Convective Events",
JOURNAL = "Monthly Weather Review",
VOLUME = 136,
NUMBER = 1,
PAGES = "78--97",
YEAR = 2008,
DOI = "10.1175/2007MWR2123.1"
}
@ARTICLE{Rebora2006,
AUTHOR = "N. Rebora and L. Ferraris and J. von Hardenberg and A. Provenzale",
TITLE = "RainFARM: Rainfall Downscaling by a Filtered Autoregressive Model",
JOURNAL = "Journal of Hydrometeorology",
VOLUME = 7,
NUMBER = 4,
PAGES = "724-738",
YEAR = 2006,
DOI = "10.1175/JHM517.1"
}
@ARTICLE{Seed2003,
AUTHOR = "A. W. Seed",
TITLE = "A Dynamic and Spatial Scaling Approach to Advection Forecasting",
JOURNAL = "Journal of Applied Meteorology",
VOLUME = 42,
NUMBER = 3,
PAGES = "381-388",
YEAR = 2003,
DOI = "10.1175/1520-0450(2003)042<0381:ADASSA>2.0.CO;2"
}
@ARTICLE{SPN2013,
AUTHOR = "A. W. Seed and C. E. Pierce and K. Norman",
TITLE = "Formulation and evaluation of a scale decomposition-based stochastic precipitation nowcast scheme",
JOURNAL = "Water Resources Research",
VOLUME = 49,
NUMBER = 10,
PAGES = "6624--6641",
YEAR = 2013,
DOI = "10.1002/wrcr.20536"
}
@Article{Terzago2018,
AUTHOR = "Terzago, S. and Palazzi, E. and von Hardenberg, J.",
TITLE = "Stochastic downscaling of precipitation in complex orography: a simple method to reproduce a realistic fine-scale climatology",
JOURNAL = "Natural Hazards and Earth System Sciences",
VOLUME = 18,
YEAR = 2018,
NUMBER = 11,
PAGES = "2825--2840",
DOI = "10.5194/nhess-18-2825-2018"
}
@ARTICLE{TRT2004,
AUTHOR = "A. M. Hering and C. Morel and G. Galli and P. Ambrosetti and M. Boscacci",
TITLE = "Nowcasting thunderstorms in the Alpine Region using a radar based adaptive thresholding scheme",
JOURNAL = "Proceedings of ERAD Conference 2004",
NUMBER = "January",
PAGES = "206--211",
YEAR = 2004
}
@ARTICLE{WHZ2009,
AUTHOR = "Heini Wernli and Christiane Hofmann and Matthias Zimmer",
TITLE = "Spatial Forecast Verification Methods Intercomparison Project: Application of the SAL Technique",
JOURNAL = "Weather and Forecasting",
NUMBER = "6",
VOLUME = "24",
PAGES = "1472 - 14847",
YEAR = 2009
}
@ARTICLE{WPHF2008,
AUTHOR = "Heini Wernli and Marcus Paulat and Martin Hagen and Christoph Frei",
TITLE = "SAL—A Novel Quality Measure for the Verification of Quantitative Precipitation Forecasts",
JOURNAL = "Monthly Weather Review",
NUMBER = "11",
VOLUME = "136",
PAGES = "4470 - 4487",
YEAR = 2008
}
@ARTICLE{XWF2005,
AUTHOR = "K. Xu and C. K Wikle and N. I. Fox",
TITLE = "A Kernel-Based Spatio-Temporal Dynamical Model for Nowcasting Weather Radar Reflectivities",
JOURNAL = "Journal of the American Statistical Association",
VOLUME = 100,
NUMBER = 472,
PAGES = "1133--1144",
YEAR = 2005
}
@ARTICLE{ZR2009,
AUTHOR = "P. Zacharov and D. Rezacova",
TITLE = "Using the fractions skill score to assess the relationship between an ensemble {QPF} spread and skill",
JOURNAL = "Atmospheric Research",
VOLUME = 94,
NUMBER = 4,
PAGES = "684--693",
YEAR = 2009,
DOI = "10.1016/j.atmosres.2009.03.004"
}
@ARTICLE{Imhoff2023,
AUTHOR = "R.O. Imhoff and L. De Cruz and W. Dewettinck and C.C. Brauer and R. Uijlenhoet and K-J. van Heeringen and C. Velasco-Forero and D. Nerini and M. Van Ginderachter and A.H. Weerts",
TITLE = "Scale-dependent blending of ensemble rainfall nowcasts and {NWP} in the open-source pysteps library",
JOURNAL = "Quarterly Journal of the Royal Meteorological Society",
VOLUME = 149,
NUMBER = 753,
PAGES = "1--30",
YEAR = 2023,
DOI = "10.1002/qj.4461"
}
@ARTICLE{Nerini2019MWR,
title = {A {Reduced}-{Space} {Ensemble} {Kalman} {Filter} {Approach} for {Flow}-{Dependent} {Integration} of {Radar} {Extrapolation} {Nowcasts} and {NWP} {Precipitation} {Ensembles}},
volume = {147},
doi = {10.1175/MWR-D-18-0258.1},
number = {3},
journal = {Monthly Weather Review},
author = {D. Nerini and L. Foresti and D. Leuenberger and S. Robert and U. Germann},
year = {2019},
pages = {987--1006},
}
================================================
FILE: doc/source/user_guide/example_data.rst
================================================
.. _example_data:
Installing the example data
===========================
The examples scripts in the user guide, as well as the build-in tests,
use the example radar data available in a separate repository:
`pysteps-data <https://github.com/pySTEPS/pysteps-data>`_.
The easiest way to install the example data is by using the
:func:`~pysteps.datasets.download_pysteps_data` and
:func:`~pysteps.datasets.create_default_pystepsrc` functions from
the :mod:`pysteps.datasets` module.
Installation using the datasets module
~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
Below is a snippet code that can be used to install can configure `pystepsrc` file to
point to that example data.
In the example below, the example data is placed in the user's home folder under the
**pysteps_data** directory. It also creates a default configuration file that points to
the downloaded data and places it in the $HOME/.pysteps (Unix and Mac OS X) or
$USERPROFILE/pysteps (Windows). This is one of the default locations where pysteps
looks for the configuration file (see :ref:`pysteps_lookup` for
more information).
.. code-block:: python
import os
# Import the helper functions
from pysteps.datasets import download_pysteps_data, create_default_pystepsrc
# In this example we will place it in the user's home folder on the
# `pysteps_data` folder.
home_dir = os.path.expanduser("~")
pysteps_data_dir_path = os.path.join(home_dir, "pysteps_data")
# Download the pysteps data.
download_pysteps_data(pysteps_data_dir_path, force=True)
# Create a default configuration file that points to the downloaded data.
# By default it will place the configuration file in the
# $HOME/.pysteps (unix and Mac OS X) or $USERPROFILE/pysteps (windows).
config_file_path = create_default_pystepsrc(pysteps_data_dir_path)
Note that for these changes to take effect you need to restart the python interpreter or
use the :func:`pysteps.load_config_file` function as follows::
# Load the new configuration file and replace the default configuration
import pysteps
pysteps.load_config_file(config_file_path, verbose=True)
To customize the default configuration file see the :ref:`pystepsrc` section.
Manual installation
~~~~~~~~~~~~~~~~~~~
Another alternative is to download the data manually into your computer and configure the
:ref:`pystepsrc <pystepsrc>` file to point to that example data.
First, download the data from the repository by
`clicking here <https://github.com/pySTEPS/pysteps-data/archive/master.zip>`_.
Unzip the data into a folder of your preference. Once the data is unzipped, the
directory structure looks like this::
pysteps-data
|
├── radar
├── KNMI
├── OPERA
├── bom
├── dwd
├── fmi
├── mch
The next step is updating the *pystepsrc* file to point to these directories,
as described in the :ref:`pystepsrc` section.
================================================
FILE: doc/source/user_guide/install_pysteps.rst
================================================
.. _install_pysteps:
Installing pysteps
==================
Dependencies
------------
The pysteps package needs the following dependencies
* `python >=3.11, <3.14 <http://www.python.org/>`_ (lower or higher versions may work but are not tested).
* `jsonschema <https://pypi.org/project/jsonschema/>`_
* `matplotlib <http://matplotlib.org/>`_
* `netCDF4 <https://pypi.org/project/netCDF4/>`_
* `numpy <http://www.numpy.org/>`_
* `opencv <https://opencv.org/>`_
* `pillow <https://python-pillow.org/>`_
* `pyproj <https://jswhit.github.io/pyproj/>`_
* `scipy <https://www.scipy.org/>`_
Additionally, the following packages can be installed for better computational
efficiency:
* `dask <https://dask.org/>`_ and
`toolz <https://github.com/pytoolz/toolz/>`_ (for code parallelization)
* `pyfftw <https://hgomersall.github.io/pyFFTW/>`_ (for faster FFT computation)
Other optional dependencies include:
* `cartopy >=0.18 <https://scitools.org.uk/cartopy/docs/latest/>`_ (for geo-referenced
visualization)
* `h5py <https://www.h5py.org/>`_ (for importing HDF5 data)
* `pygrib <https://jswhit.github.io/pygrib/docs/index.html>`_ (for importing MRMS data)
* `gdal <https://gdal.org/>`_ (for importing GeoTIFF data)
* `pywavelets <https://pywavelets.readthedocs.io/en/latest/>`_
(for intensity-scale verification)
* `pandas <https://pandas.pydata.org/>`_ and
`scikit-image >=0.19 <https://scikit-image.org/>`_ (for advanced feature detection methods)
* `rasterio <https://rasterio.readthedocs.io/en/latest/>`_ (for the reprojection module)
* `scikit-learn >=1.7 <https://scikit-learn.org/>`_ (for PCA-based blending methods)
**Important**: If you only want to use pysteps, you can continue reading below.
But, if you want to contribute to pysteps or edit the package, you need to install
pysteps in development mode: :ref:`Contributing to pysteps <contributor_guidelines>`.
Install with conda/mamba (recommended)
--------------------------------------
`Conda <https://docs.conda.io/>`_ is an open-source package management system and environment
management system that runs on Windows, macOS, and Linux.
`Mamba <https://mamba.readthedocs.io/>`_ is a drop-in replacement for conda offering
better performances and more reliable environment
solutions. Mamba quickly installs, runs, and updates packages and their dependencies.
It also allows you to easily create, save, load, or switch between different
environments on your local computer.
Since version 1.0, pysteps is available on `conda-forge <https://conda-forge.org/>`_,
a community-driven package repository for conda packages.
To install pysteps with mamba in a new environment, run in a terminal::
mamba create -n pysteps python=3.11
mamba activate pysteps
This will create and activate the new python environment called 'pysteps' using python 3.11.
The next step is to add the conda-forge channel where the pysteps package is located::
conda config --env --prepend channels conda-forge
Let's set this channel as the priority one::
conda config --env --set channel_priority strict
The latter step is not strictly necessary but is recommended since
the conda-forge and the default conda channels are not 100% compatible.
Finally, to install pysteps and all its dependencies run::
mamba install pysteps
Install pysteps on Apple Silicon Macs
~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
On conda-forge, pysteps is currently compiled for Mac computers with Intel processors (osx-64).
However, thanks to `Rosetta 2 <https://support.apple.com/en-us/HT211861>`_ it is
possible to install the same package on a Mac computers with an Apple Silicon processor
(arm-64).
First, make sure that Rosetta 2 is installed::
softwareupdate --install-rosetta
Use mamba to create a new environment called 'pysteps' for intel packages with python 3.11::
CONDA_SUBDIR=osx-64 mamba create -n pysteps python=3.11
mamba activate pysteps
Make sure that conda/mamba commands in this environment use intel packages::
conda config --env --set subdir osx-64
Verify that the correct platform is being used::
python -c "import platform;print(platform.machine())" # Should print "x86_64"
Finally, run the same pysteps install instructions as given above::
conda config --env --prepend channels conda-forge
conda config --env --set channel_priority strict
mamba install pysteps
We can now verify that pysteps loads correctly::
python -c "import pysteps"
Note that the first time that pysteps is imported will typically take longer, as Rosetta 2
needs to translate the binary code for the Apple Silicon processor.
Install from source
-------------------
The recommended way to install pysteps from the source is using ``pip``
to adhere to the `PEP517 standards <https://www.python.org/dev/peps/pep-0517/>`_.
Using ``pip`` instead of ``setup.py`` guarantees that all the package dependencies
are properly handled during the installation process.
OSX users: gcc compiler
~~~~~~~~~~~~~~~~~~~~~~~
pySTEPS uses Cython extensions that need to be compiled with multi-threading
support enabled. The default Apple Clang compiler does not support OpenMP.
Hence, using the default compiler would have disabled multi-threading and may raise
the following error during the installation::
clang: error: unsupported option '-fopenmp'
error: command 'gcc' failed with exit status 1
To solve this issue, obtain the latest gcc version with
Homebrew_ that has multi-threading enabled::
brew install gcc@13
.. _Homebrew: https://brew.sh/
To make sure that the installer uses the homebrew's gcc, export the
following environmental variables in the terminal
(supposing that gcc version 13 was installed)::
export CC=gcc-13
export CXX=g++-13
First, check that the homebrew's gcc is detected::
which gcc-13
This should point to the homebrew's gcc installation.
Under certain circumstances, Homebrew_ does not add the symbolic links for the
gcc executables under /usr/local/bin.
If that is the case, specify the CC and CCX variables using the full path to
the homebrew installation. For example::
export CC=/usr/local/Cellar/gcc/13.2.0/bin/gcc-13
export CXX=/usr/local/Cellar/gcc/13.2.0/bin/g++-13
Then, you can continue with the normal installation procedure described next.
Installation using pip
~~~~~~~~~~~~~~~~~~~~~~
The latest pysteps version in the repository can be installed using pip by
simply running in a terminal::
pip install git+https://github.com/pySTEPS/pysteps
Or, from a local copy of the repo::
git clone https://github.com/pySTEPS/pysteps
cd pysteps
pip install .
The above commands install the latest version of the **master** branch,
which is continuously under development.
.. warning::
If you are installing pysteps from the sources using pip, the Python interpreter must be launched outside of the pysteps root directory.
Importing pysteps from a working directory that contains the pysteps source code will raise a ``ModuleNotFoundError``.
This error is caused by the root pysteps folder being recognized as the pysteps package, also known as
`the double import trap <http://python-notes.curiousefficiency.org/en/latest/python_concepts/import_traps.html#the-double-import-trap>`_.
Setting up the user-defined configuration file
----------------------------------------------
The pysteps package allows the users to customize the default settings
and configuration.
The configuration parameters used by default are loaded from a user-defined
`JSON <https://en.wikipedia.org/wiki/JSON>`_ file and then stored in the **pysteps.rcparams**, a dictionary-like object
that can be accessed as attributes or as items.
.. toctree::
:maxdepth: 1
Set-up the user-defined configuration file <set_pystepsrc>
Example pystepsrc file <pystepsrc_example>
.. _import_pysteps:
Final test: import pysteps in Python
------------------------------------
Activate the pysteps environment::
conda activate pysteps
Launch Python and import pysteps::
python
>>> import pysteps
================================================
FILE: doc/source/user_guide/machine_learning_pysteps.rst
================================================
.. _machine_learning_pysteps:
Benchmarking machine learning models with pysteps
=================================================
How to correctly compare the accuracy of machine learning against traditional nowcasting methods available in pysteps?
Before starting the comparison, you need to ask yourself what is the objective of nowcasting:
#. Do you only want to minimize prediction errors?
#. Do you also want to represent the prediction uncertainty?
To achieve objective 1, it is sufficient to produce a single deterministic nowcast that filters out the unpredictable small-scale precipitation features.
However, this will create a nowcast that will become increasingly smooth over time.
To achieve objective 2, you need to produce a probabilistic or an ensemble nowcast (several ensemble members or realizations).
In weather forecasting (and nowcasting), we usually want to achieve both goals because it is impossible to predict the evolution of a chaotic system with 100% accuracy, especially space-time precipitation fields and thunderstorms!
Machine learning and pysteps offer several methods to produce both deterministic and probabilistic nowcasts.
Therefore, if you want to compare machine learning-based nowcasts to simpler extrapolation-based models, you need to select the right method and verification measure.
1. Deterministic nowcasting
--------------------------------------------
Deterministic nowcasts can be divided into:
a. Variance-preserving nowcasts, such as extrapolation nowcasts by Eulerian and Lagrangian persistence.
b. Error-minimization nowcasts, such as machine learning, Fourier-filtered and ensemble mean nowcasts.
**Very important**: these two types of deterministic nowcasts are not directly comparable because they have a different variance!
This is best explained by the decomposition of the mean squared error (MSE):
:math:`MSE = bias^2 + Var`
All deterministic machine learning algorithms that minimize the MSE (or a related measure) will also inevitably minimize the variance of nowcast fields.
This is a natural attempt to filter out the unpredictable evolution of precipitation features, which would otherwise increase the variance (and the MSE).
The same principle holds for convolutional and/or deep neural network architectures, which also produce smooth nowcasts.
Therefore, it is better to avoid directly comparing an error-minimization machine learning nowcast to a variance-preserving radar extrapolation, as produced by the module :py:mod:`pysteps.nowcasts.extrapolation`. Instead, you should use compare with the mean of a sufficiently large ensemble.
A deterministic equivalent of the ensemble mean can be approximated using the modules :py:mod:`pysteps.nowcasts.sprog` or :py:mod:`pysteps.nowcasts.anvil`.
Another possibility, but more computationally demanding, is to average many ensemble members generated by the modules :py:mod:`pysteps.nowcasts.steps` or :py:mod:`pysteps.nowcasts.linda`.
Still, even by using the pysteps ensemble mean, it is not given that its variance will be the same as the one of machine learning predictions.
Possible solutions to this:
#. use a normalized MSE (NMSE) or another score accounting for differences in the variance between prediction and observation.
#. decompose the field with a Fourier (or wavelet) transform to compare features at the same spatial scales.
A good deterministic comparison of a deep convolutional machine learning neural network nowcast and pysteps is given in :cite:`FNPC2020`.
2. Probabilistic nowcasting
--------------------------------------------
Probabilistic machine learning regression methods can be roughly categorized into:
a. Quantile-based methods, such as quantile regression, quantile random forests, and quantile neural networks.
b. Ensemble-based methods, such as generative adversarial networks (GANs) and variational auto-encoders (VAEs).
Quantile-based machine learning nowcasts are interesting, but can only estimate the probability of exceedance at a given point (see e.g. :cite:`FSNBG2019`).
To estimate areal exceedance probabilities, for example above catchments, or to propagate the nowcast uncertainty into hydrological models, the full ensemble still needs to be generated, e.g. with generative machine learning models.
Generative machine learning methods are similar to the pysteps ensemble members. Both are designed to produce an ensemble of possible realizations that preserve the variance of observed radar fields.
A proper probabilistic verification of generative machine learning models against pysteps is an interesting research direction which was recently undertake in the work of :cite:`Ravuri2021`.
Summary
-------
The table below is an attempt to classify machine learning and pysteps nowcasting methods according to the four main prediction types:
#. Deterministic (variance-preserving), like one control NWP forecast
#. Deterministic (error-minimization), like an ensemble mean NWP forecast
#. Probabilistic (quantile-based), like a probabilistic NWP forecast (without members)
#. Probabilistic (ensemble-based), like the members of an ensemble NWP forecast
The comparison of methods from different types should only be done carefully and with good reasons.
.. list-table::
:widths: 30 20 20 20
:header-rows: 1
* - Nowcast type
- Machine learning
- pysteps
- Verification
* - Deterministic (variance-preserving)
- SRGAN, Others?
- :py:mod:`pysteps.nowcasts.extrapolation` (any optical flow method)
- MSE, RMSE, MAE, ETS, etc
* - Deterministic (error-minimization)
- Classical ANNs, (deep) CNNs, random forests, AdaBoost, etc
- :py:mod:`pysteps.nowcasts.sprog`, :py:mod:`pysteps.nowcasts.anvil` or ensemble mean of :py:mod:`pysteps.nowcasts.steps`/:py:mod:`~pysteps.nowcasts.linda`
- MSE, RMSE, MAE, ETS, etc or better normalized scores, etc
* - Probabilistic (quantile-based)
- Quantile ANN, quantile random forests, quantile regression
- :py:mod:`pysteps.nowcasts.lagrangian_probability` or probabilities derived from :py:mod:`pysteps.nowcasts.steps`/:py:mod:`~pysteps.nowcasts.linda`
- Reliability diagram (predicted vs observed quantile), probability integral transform (PIT) histogram
* - Probabilistic (ensemble-based)
- GANs (:cite:`Ravuri2021`), VAEs, etc
- Ensemble and probabilities derived from :py:mod:`pysteps.nowcasts.steps`/:py:mod:`~pysteps.nowcasts.linda`
- Probabilistic verification: reliability diagrams, continuous ranked probability scores (CRPS), etc.
Ensemble verification: rank histograms, spread-error relationships, etc
================================================
FILE: doc/source/user_guide/pystepsrc_example.rst
================================================
.. _pystepsrc_example:
Example of pystepsrc file
=========================
Below you can find the default pystepsrc file.
The lines starting with "//" are comments and they are ignored.
.. code::
// pysteps configuration
{
// "silent_import" : whether to suppress the initial pysteps message
"silent_import": false,
"outputs": {
// path_outputs : path where to save results (figures, forecasts, etc)
"path_outputs": "./"
},
"plot": {
// "motion_plot" : "streamplot" or "quiver"
"motion_plot": "quiver",
// "colorscale" : "BOM-RF3", "pysteps" or "STEPS-BE"
"colorscale": "pysteps"
},
"data_sources": {
"bom": {
"root_path": "./radar/bom",
"path_fmt": "prcp-cscn/2/%Y/%m/%d",
"fn_pattern": "2_%Y%m%d_%H%M00.prcp-cscn",
"fn_ext": "nc",
"importer": "bom_rf3",
"timestep": 6,
"importer_kwargs": {
"gzipped": true
}
},
"fmi": {
"root_path": "./radar/fmi",
"path_fmt": "%Y%m%d",
"fn_pattern": "%Y%m%d%H%M_fmi.radar.composite.lowest_FIN_SUOMI1",
"fn_ext": "pgm.gz",
"importer": "fmi_pgm",
"timestep": 5,
"importer_kwargs": {
"gzipped": true
}
},
"mch": {
"root_path": "./radar/mch",
"path_fmt": "%Y%m%d",
"fn_pattern": "AQC%y%j%H%M?_00005.801",
"fn_ext": "gif",
"importer": "mch_gif",
"timestep": 5,
"importer_kwargs": {
"product": "AQC",
"unit": "mm",
"accutime": 5
}
},
"opera": {
"root_path": "./radar/OPERA",
"path_fmt": "%Y%m%d",
"fn_pattern": "T_PAAH21_C_EUOC_%Y%m%d%H%M%S",
"fn_ext": "hdf",
"importer": "opera_hdf5",
"timestep": 15,
"importer_kwargs": {}
},
"knmi": {
"root_path": "./radar/KNMI",
"path_fmt": "%Y/%m",
"fn_pattern": "RAD_NL25_RAP_5min_%Y%m%d%H%M",
"fn_ext": "h5",
"importer": "knmi_hdf5",
"timestep": 5,
"importer_kwargs": {
"accutime": 5,
"qty": "ACRR",
"pixelsize": 1000.0
}
},
"saf": {
"root_path": "./saf",
"path_fmt": "%Y%m%d/CRR",
"fn_pattern": "S_NWC_CRR_MSG4_Europe-VISIR_%Y%m%dT%H%M00Z",
"fn_ext": "nc",
"importer": "saf_crri",
"timestep": 15,
"importer_kwargs": {
"gzipped": true
}
}
}
}
================================================
FILE: doc/source/user_guide/set_pystepsrc.rst
================================================
.. _pystepsrc:
The pysteps configuration file (pystepsrc)
==========================================
.. _JSON: https://en.wikipedia.org/wiki/JSON
The pysteps package allows the users to customize the default settings
and configuration.
The configuration parameters used by default are loaded from a user-defined
JSON_ file and then stored in `pysteps.rcparams`, a dictionary-like object
that can be accessed as attributes or as items.
For example, the default parameters can be obtained using any of the following ways::
import pysteps
# Retrieve the colorscale for plots
colorscale = pysteps.rcparams['plot']['colorscale']
colorscale = pysteps.rcparams.plot.colorscale
# Retrieve the the root directory of the fmi data
pysteps.rcparams['data_sources']['fmi']['root_path']
pysteps.rcparams.data_sources.fmi.root_path
A less wordy alternative::
from pysteps import rcparams
colorscale = rcparams['plot']['colorscale']
colorscale = rcparams.plot.colorscale
fmi_root_path = rcparams['data_sources']['fmi']['root_path']
fmi_root_path = rcparams.data_sources.fmi.root_path
.. _pysteps_lookup:
Configuration file lookup
~~~~~~~~~~~~~~~~~~~~~~~~~
When the pysteps package imported, it looks for **pystepsrc** file in the
following order:
- **$PWD/pystepsrc** : Looks for the file in the current directory
- **$PYSTEPSRC** : If the system variable $PYSTEPSRC is defined and it
points to a file, it is used.
- **$PYSTEPSRC/pystepsrc** : If $PYSTEPSRC points to a directory, it looks for the
pystepsrc file inside that directory.
- **$HOME/.pysteps/pystepsrc** (Unix and Mac OS X) : If the system variable $HOME is defined, it looks
for the configuration file in this path.
- **%USERPROFILE%\\pysteps\\pystepsrc** (Windows only): It looks for the configuration file
in the pysteps directory located user's home directory (indicated by the %USERPROFILE%
system variable).
- Lastly, it looks inside the library in *pysteps\\pystepsrc* for a
system-defined copy.
The recommended method to setup the configuration files is to edit a copy
of the default **pystepsrc** file that is distributed with the package
and place that copy inside the user home folder.
See the instructions below.
Setting up the user-defined configuration file
----------------------------------------------
Linux and OSX users
~~~~~~~~~~~~~~~~~~~
For Linux and OSX users, the recommended way to customize the pysteps
configuration is placing the pystepsrc parameters file in the users home folder
${HOME} in the following path: **${HOME}/.pysteps/pystepsrc**
To steps to setup up the configuration file in the home directory first, we
need to create the directory if it does not exist. In a terminal, run::
$ mkdir -p ${HOME}/.pysteps
The next step is to find the location of the library's default pystepsrc file.
When we import pysteps in a python interpreter, the configuration file loaded
is shown::
import pysteps
"Pysteps configuration file found at: /path/to/pysteps/library/pystepsrc"
Then we copy the library's default configuration file to that directory::
$ cp /path/to/pysteps/library/pystepsrc ${HOME}/.pysteps/pystepsrc
Edit the file with the text editor of your preference and change the default
configurations with your preferences.
Finally, check that the correct configuration file is loaded by the library::
import pysteps
"Pysteps configuration file found at: /home/user_name/.pysteps/pystepsrc"
Windows
~~~~~~~
For windows users, the recommended way to customize the pysteps
configuration is placing the pystepsrc parameters file in the users' folder
(defined in the %USERPROFILE% environment variable) in the following path:
**%USERPROFILE%\\pysteps\\pystepsrc**
To setup up the configuration file in the home directory first, we
need to create the directory if it does not exist. In a **windows terminal**, run::
$ mkdir %USERPROFILE%\pysteps
**Important**
It was reported that the %USERPROFILE% variable may be interpreted as an string
literal when the anaconda terminal is used.
This will result in a '%USERPROFILE%' folder being created in the current working directory
instead of the desired pysteps folder in the user's home.
If that is the case, use the explicit path to your home folder instead of `%USERPROFILE%`.
For example::
$ mkdir C:\Users\your_username\pysteps
The next step is to find the location of the library's default pystepsrc file.
When we import pysteps in a python interpreter, the configuration file loaded
is shown::
import pysteps
"Pysteps configuration file found at: C:\path\to\pysteps\library\pystepsrc"
Then we copy the library's default configuration file to that directory::
$ copy C:\path\to\pysteps\library\pystepsrc %USERPROFILE%\pysteps\pystepsrc
Edit the file with the text editor of your preference and change the default
configurations with your preferences.
Finally, check that the correct configuration file is loaded by the library::
import pysteps
"Pysteps configuration file found at: C:\User\Profile\.pysteps\pystepsrc"
More
----
.. toctree::
:maxdepth: 1
Example pystepsrc file <pystepsrc_example>
================================================
FILE: doc/source/zz_bibliography.rst
================================================
.. _bibliography:
============
Bibliography
============
.. bibliography::
:all:
================================================
FILE: environment.yml
================================================
name: pysteps
channels:
- conda-forge
- defaults
dependencies:
- python>=3.10
- jsmin
- jsonschema
- matplotlib
- netCDF4
- numpy
- opencv
- pillow
- pyproj
- scipy
================================================
FILE: environment_dev.yml
================================================
# pysteps development environment
name: pysteps_dev
channels:
- conda-forge
- defaults
dependencies:
- python>=3.10
- pip
- jsmin
- jsonschema
- matplotlib
- netCDF4
- numpy
- opencv
- pillow
- pyproj
- scipy
- pytest
- pywavelets
- cython
- dask
- pyfftw
- h5py
- PyWavelets
- pygrib
- black
- pytest-cov
- codecov
- pre_commit
- cartopy>=0.18
- scikit-image
- scikit-learn
- pandas
- rasterio
================================================
FILE: examples/LK_buffer_mask.py
================================================
# -*- coding: utf-8 -*-
"""
Handling of no-data in Lucas-Kanade
===================================
Areas of missing data in radar images are typically caused by visibility limits
such as beam blockage and the radar coverage itself. These artifacts can mislead
the echo tracking algorithms. For instance, precipitation leaving the domain
might be erroneously detected as having nearly stationary velocity.
This example shows how the Lucas-Kanade algorithm can be tuned to avoid the
erroneous interpretation of velocities near the maximum range of the radars by
buffering the no-data mask in the radar image in order to exclude all vectors
detected nearby no-data areas.
"""
from datetime import datetime
from matplotlib import cm, colors
import matplotlib.pyplot as plt
import numpy as np
from pysteps import io, motion, nowcasts, rcparams, verification
from pysteps.utils import conversion, transformation
from pysteps.visualization import plot_precip_field, quiver
################################################################################
# Read the radar input images
# ---------------------------
#
# First, we will import the sequence of radar composites.
# You need the pysteps-data archive downloaded and the pystepsrc file
# configured with the data_source paths pointing to data folders.
# Selected case
date = datetime.strptime("201607112100", "%Y%m%d%H%M")
data_source = rcparams.data_sources["mch"]
###############################################################################
# Load the data from the archive
# ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
root_path = data_source["root_path"]
path_fmt = data_source["path_fmt"]
fn_pattern = data_source["fn_pattern"]
fn_ext = data_source["fn_ext"]
importer_name = data_source["importer"]
importer_kwargs = data_source["importer_kwargs"]
timestep = data_source["timestep"]
# Find the two input files from the archive
fns = io.archive.find_by_date(
date, root_path, path_fmt, fn_pattern, fn_ext, timestep=5, num_prev_files=1
)
# Read the radar composites
importer = io.get_method(importer_name, "importer")
R, quality, metadata = io.read_timeseries(fns, importer, **importer_kwargs)
del quality # Not used
###############################################################################
# Preprocess the data
# ~~~~~~~~~~~~~~~~~~~
# Convert to mm/h
R, metadata = conversion.to_rainrate(R, metadata)
# Keep the reference frame in mm/h and its mask (for plotting purposes)
ref_mm = R[0, :, :].copy()
mask = np.ones(ref_mm.shape)
mask[~np.isnan(ref_mm)] = np.nan
# Log-transform the data [dBR]
R, metadata = transformation.dB_transform(R, metadata, threshold=0.1, zerovalue=-15.0)
# Keep the reference frame in dBR (for plotting purposes)
ref_dbr = R[0].copy()
ref_dbr[ref_dbr < -10] = np.nan
# Plot the reference field
plot_precip_field(ref_mm, title="Reference field")
circle = plt.Circle((620, 400), 100, color="b", clip_on=False, fill=False)
plt.gca().add_artist(circle)
plt.show()
###############################################################################
# Notice the "half-in, half-out" precipitation area within the blue circle.
# As we are going to show next, the tracking algorithm can erroneously interpret
# precipitation leaving the domain as stationary motion.
#
# Also note that the radar image includes NaNs in areas of missing data.
# These are used by the optical flow algorithm to define the radar mask.
#
# Sparse Lucas-Kanade
# -------------------
#
# By setting the optional argument ``dense=False`` in ``xy, uv = dense_lucaskanade(...)``,
# the LK algorithm returns the motion vectors detected by the Lucas-Kanade scheme
# without interpolating them on the grid.
# This allows us to better identify the presence of wrongly detected
# stationary motion in areas where precipitation is leaving the domain (look
# for the red dots within the blue circle in the figure below).
# Get Lucas-Kanade optical flow method
dense_lucaskanade = motion.get_method("LK")
# Mask invalid values
R = np.ma.masked_invalid(R)
# Use no buffering of the radar mask
fd_kwargs1 = {"buffer_mask": 0}
xy, uv = dense_lucaskanade(R, dense=False, fd_kwargs=fd_kwargs1)
plt.imshow(ref_dbr, cmap=plt.get_cmap("Greys"))
plt.imshow(mask, cmap=colors.ListedColormap(["black"]), alpha=0.5)
plt.quiver(
xy[:, 0],
xy[:, 1],
uv[:, 0],
uv[:, 1],
color="red",
angles="xy",
scale_units="xy",
scale=0.2,
)
circle = plt.Circle((620, 245), 100, color="b", clip_on=False, fill=False)
plt.gca().add_artist(circle)
plt.title("buffer_mask = 0")
plt.show()
################################################################################
# The LK algorithm cannot distinguish missing values from no precipitation, that is,
# no-data are the same as no-echoes. As a result, the fixed boundaries produced
# by precipitation in contact with no-data areas are interpreted as stationary motion.
# One way to mitigate this effect of the boundaries is to introduce a slight buffer
# of the no-data mask so that the algorithm will ignore all the portions of the
# radar domain that are nearby no-data areas.
# This buffer can be set by the keyword argument ``buffer_mask`` within the
# feature detection optional arguments ``fd_kwargs``.
# Note that by default ``dense_lucaskanade`` uses a 5-pixel buffer.
# with buffer
buffer = 10
fd_kwargs2 = {"buffer_mask": buffer}
xy, uv = dense_lucaskanade(R, dense=False, fd_kwargs=fd_kwargs2)
plt.imshow(ref_dbr, cmap=plt.get_cmap("Greys"))
plt.imshow(mask, cmap=colors.ListedColormap(["black"]), alpha=0.5)
plt.quiver(
xy[:, 0],
xy[:, 1],
uv[:, 0],
uv[:, 1],
color="red",
angles="xy",
scale_units="xy",
scale=0.2,
)
circle = plt.Circle((620, 245), 100, color="b", clip_on=False, fill=False)
plt.gca().add_artist(circle)
plt.title("buffer_mask = %i" % buffer)
plt.show()
################################################################################
# Dense Lucas-Kanade
# ------------------
#
# The above displacement vectors produced by the Lucas-Kanade method are now
# interpolated to produce a full field of motion (i.e., ``dense=True``).
# By comparing the velocity of the motion fields, we can easily notice
# the negative bias that is introduced by the the erroneous interpretation of
# velocities near the maximum range of the radars.
UV1 = dense_lucaskanade(R, dense=True, fd_kwargs=fd_kwargs1)
UV2 = dense_lucaskanade(R, dense=True, fd_kwargs=fd_kwargs2)
V1 = np.sqrt(UV1[0] ** 2 + UV1[1] ** 2)
V2 = np.sqrt(UV2[0] ** 2 + UV2[1] ** 2)
plt.imshow((V1 - V2) / V2, cmap=cm.RdBu_r, vmin=-0.5, vmax=0.5)
plt.colorbar(fraction=0.04, pad=0.04)
plt.title("Relative difference in motion speed")
plt.show()
################################################################################
# Notice how the presence of erroneous velocity vectors produces a significantly
# slower motion field near the right edge of the domain.
#
# Forecast skill
# --------------
#
# We are now going to evaluate the benefit of buffering the radar mask by computing
# the forecast skill in terms of the Spearman correlation coefficient.
# The extrapolation forecasts are computed using the dense UV motion fields
# estimated above.
# Get the advection routine and extrapolate the last radar frame by 12 time steps
# (i.e., 1 hour lead time)
extrapolate = nowcasts.get_method("extrapolation")
R[~np.isfinite(R)] = metadata["zerovalue"]
R_f1 = extrapolate(R[-1], UV1, 12)
R_f2 = extrapolate(R[-1], UV2, 12)
# Back-transform to rain rate
R_f1 = transformation.dB_transform(R_f1, threshold=-10.0, inverse=True)[0]
R_f2 = transformation.dB_transform(R_f2, threshold=-10.0, inverse=True)[0]
# Find the veriyfing observations in the archive
fns = io.archive.find_by_date(
date, root_path, path_fmt, fn_pattern, fn_ext, timestep=5, num_next_files=12
)
# Read and convert the radar composites
R_o, _, metadata_o = io.read_timeseries(fns, importer, **importer_kwargs)
R_o, metadata_o = conversion.to_rainrate(R_o, metadata_o)
# Compute Spearman correlation
skill = verification.get_method("corr_s")
score_1 = []
score_2 = []
for i in range(12):
score_1.append(skill(R_f1[i, :, :], R_o[i + 1, :, :])["corr_s"])
score_2.append(skill(R_f2[i, :, :], R_o[i + 1, :, :])["corr_s"])
x = (np.arange(12) + 1) * 5 # [min]
plt.plot(x, score_1, label="buffer_mask = 0")
plt.plot(x, score_2, label="buffer_mask = %i" % buffer)
plt.legend()
plt.xlabel("Lead time [min]")
plt.ylabel("Corr. coeff. []")
plt.title("Spearman correlation")
plt.tight_layout()
plt.show()
################################################################################
# As expected, the corrected motion field produces better forecast skill already
# within the first hour into the nowcast.
# sphinx_gallery_thumbnail_number = 2
================================================
FILE: examples/README.txt
================================================
.. _example_gallery:
Example gallery
===============
Below is a collection of example scripts and tutorials to illustrate the usage
of pysteps.
These scripts require the pysteps example data.
See the installation instructions in the :ref:`example_data` section.
================================================
FILE: examples/advection_correction.py
================================================
"""
Advection correction
====================
This tutorial shows how to use the optical flow routines of pysteps to implement
the advection correction procedure described in Anagnostou and Krajewski (1999).
Advection correction is a temporal interpolation procedure that is often used
when estimating rainfall accumulations to correct for the shift of rainfall patterns
between consecutive radar rainfall maps. This shift becomes particularly
significant for long radar scanning cycles and in presence of fast moving
precipitation features.
.. note:: The code for the advection correction using pysteps was originally
written by `Daniel Wolfensberger <https://github.com/wolfidan>`_.
"""
from datetime import datetime
import matplotlib.pyplot as plt
import numpy as np
from pysteps import io, motion, rcparams
from pysteps.utils import conversion, dimension
from pysteps.visualization import plot_precip_field
from scipy.ndimage import map_coordinates
################################################################################
# Read the radar input images
# ---------------------------
#
# First, we import a sequence of 36 images of 5-minute radar composites
# that we will use to produce a 3-hour rainfall accumulation map.
# We will keep only one frame every 10 minutes, to simulate a longer scanning
# cycle and thus better highlight the need for advection correction.
#
# You need the pysteps-data archive downloaded and the pystepsrc file
# configured with the data_source paths pointing to data folders.
# Selected case
date = datetime.strptime("201607112100", "%Y%m%d%H%M")
data_source = rcparams.data_sources["mch"]
###############################################################################
# Load the data from the archive
# ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
root_path = data_source["root_path"]
path_fmt = data_source["path_fmt"]
fn_pattern = data_source["fn_pattern"]
fn_ext = data_source["fn_ext"]
importer_name = data_source["importer"]
importer_kwargs = data_source["importer_kwargs"]
timestep = data_source["timestep"]
# Find the input files from the archive
fns = io.archive.find_by_date(
date, root_path, path_fmt, fn_pattern, fn_ext, timestep=5, num_next_files=35
)
# Read the radar composites
importer = io.get_method(importer_name, "importer")
R, __, metadata = io.read_timeseries(fns, importer, **importer_kwargs)
# Convert to mm/h
R, metadata = conversion.to_rainrate(R, metadata)
# Upscale to 2 km (simply to reduce the memory demand)
R, metadata = dimension.aggregate_fields_space(R, metadata, 2000)
# Keep only one frame every 10 minutes (i.e., every 2 timesteps)
# (to highlight the need for advection correction)
R = R[::2]
################################################################################
# Advection correction
# --------------------
#
# Now we need to implement the advection correction for a pair of successive
# radar images. The procedure is based on the algorithm described in Anagnostou
# and Krajewski (Appendix A, 1999).
#
# To evaluate the advection occurred between two successive radar images, we are
# going to use the Lucas-Kanade optical flow routine available in pysteps.
def advection_correction(R, T=5, t=1):
"""
R = np.array([qpe_previous, qpe_current])
T = time between two observations (5 min)
t = interpolation timestep (1 min)
"""
# Evaluate advection
oflow_method = motion.get_method("LK")
fd_kwargs = {"buffer_mask": 10} # avoid edge effects
V = oflow_method(np.log(R), fd_kwargs=fd_kwargs)
# Perform temporal interpolation
Rd = np.zeros((R[0].shape))
x, y = np.meshgrid(
np.arange(R[0].shape[1], dtype=float), np.arange(R[0].shape[0], dtype=float)
)
for i in range(t, T + t, t):
pos1 = (y - i / T * V[1], x - i / T * V[0])
R1 = map_coordinates(R[0], pos1, order=1)
pos2 = (y + (T - i) / T * V[1], x + (T - i) / T * V[0])
R2 = map_coordinates(R[1], pos2, order=1)
Rd += (T - i) * R1 + i * R2
return t / T**2 * Rd
###############################################################################
# Finally, we apply the advection correction to the whole sequence of radar
# images and produce the rainfall accumulation map.
R_ac = R[0].copy()
for i in range(R.shape[0] - 1):
R_ac += advection_correction(R[i : (i + 2)], T=10, t=1)
R_ac /= R.shape[0]
###############################################################################
# Results
# -------
#
# We compare the two accumulation maps. The first map on the left is
# computed without advection correction and we can therefore see that the shift
# between successive images 10 minutes apart produces irregular accumulations.
# Conversely, the rainfall accumulation of the right is produced using advection
# correction to account for this spatial shift. The final result is a smoother
# rainfall accumulation map.
plt.figure(figsize=(9, 4))
plt.subplot(121)
plot_precip_field(R.mean(axis=0), title="3-h rainfall accumulation")
plt.subplot(122)
plot_precip_field(R_ac, title="Same with advection correction")
plt.tight_layout()
plt.show()
################################################################################
# Reference
# ~~~~~~~~~
#
# Anagnostou, E. N., and W. F. Krajewski. 1999. "Real-Time Radar Rainfall
# Estimation. Part I: Algorithm Formulation." Journal of Atmospheric and
# Oceanic Technology 16: 189–97.
# https://doi.org/10.1175/1520-0426(1999)016<0189:RTRREP>2.0.CO;2
================================================
FILE: examples/anvil_nowcast.py
================================================
# coding: utf-8
"""
ANVIL nowcast
=============
This example demonstrates how to use ANVIL and the advantages compared to
extrapolation nowcast and S-PROG.
Load the libraries.
"""
from datetime import datetime, timedelta
import warnings
warnings.simplefilter("ignore")
import matplotlib.pyplot as plt
import numpy as np
from pysteps import motion, io, rcparams, utils
from pysteps.nowcasts import anvil, extrapolation, sprog
from pysteps.utils import transformation
from pysteps.visualization import plot_precip_field
###############################################################################
# Read the input data
# -------------------
#
# ANVIL was originally developed to use vertically integrated liquid (VIL) as
# the input data, but the model allows using any two-dimensional input fields.
# Here we use a composite of rain rates.
date = datetime.strptime("201505151620", "%Y%m%d%H%M")
# Read the data source information from rcparams
data_source = rcparams.data_sources["mch"]
root_path = data_source["root_path"]
path_fmt = data_source["path_fmt"]
fn_pattern = data_source["fn_pattern"]
fn_ext = data_source["fn_ext"]
importer_name = data_source["importer"]
importer_kwargs = data_source["importer_kwargs"]
# Find the input files in the archive. Use history length of 5 timesteps
filenames = io.archive.find_by_date(
date, root_path, path_fmt, fn_pattern, fn_ext, timestep=5, num_prev_files=5
)
# Read the input time series
importer = io.get_method(importer_name, "importer")
rainrate_field, quality, metadata = io.read_timeseries(
filenames, importer, **importer_kwargs
)
# Convert to rain rate (mm/h)
rainrate_field, metadata = utils.to_rainrate(rainrate_field, metadata)
################################################################################
# Compute the advection field
# ---------------------------
#
# Apply the Lucas-Kanade method with the parameters given in Pulkkinen et al.
# (2020) to compute the advection field.
fd_kwargs = {}
fd_kwargs["max_corners"] = 1000
fd_kwargs["quality_level"] = 0.01
fd_kwargs["min_distance"] = 2
fd_kwargs["block_size"] = 8
lk_kwargs = {}
lk_kwargs["winsize"] = (15, 15)
oflow_kwargs = {}
oflow_kwargs["fd_kwargs"] = fd_kwargs
oflow_kwargs["lk_kwargs"] = lk_kwargs
oflow_kwargs["decl_scale"] = 10
oflow = motion.get_method("lucaskanade")
# transform the input data to logarithmic scale
rainrate_field_log, _ = utils.transformation.dB_transform(
rainrate_field, metadata=metadata
)
velocity = oflow(rainrate_field_log, **oflow_kwargs)
###############################################################################
# Compute the nowcasts and threshold rain rates below 0.5 mm/h
# ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
forecast_extrap = extrapolation.forecast(
rainrate_field[-1], velocity, 3, extrap_kwargs={"allow_nonfinite_values": True}
)
forecast_extrap[forecast_extrap < 0.5] = 0.0
# log-transform the data and the threshold value to dBR units for S-PROG
rainrate_field_db, _ = transformation.dB_transform(
rainrate_field, metadata, threshold=0.1, zerovalue=-15.0
)
rainrate_thr, _ = transformation.dB_transform(
np.array([0.5]), metadata, threshold=0.1, zerovalue=-15.0
)
forecast_sprog = sprog.forecast(
rainrate_field_db[-3:], velocity, 3, n_cascade_levels=6, precip_thr=rainrate_thr[0]
)
forecast_sprog, _ = transformation.dB_transform(
forecast_sprog, threshold=-10.0, inverse=True
)
forecast_sprog[forecast_sprog < 0.5] = 0.0
forecast_anvil = anvil.forecast(
rainrate_field[-4:], velocity, 3, ar_window_radius=25, ar_order=2
)
forecast_anvil[forecast_anvil < 0.5] = 0.0
###############################################################################
# Read the reference observation field and threshold rain rates below 0.5 mm/h
# ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
filenames = io.archive.find_by_date(
date, root_path, path_fmt, fn_pattern, fn_ext, timestep=5, num_next_files=3
)
refobs_field, _, metadata = io.read_timeseries(filenames, importer, **importer_kwargs)
refobs_field, metadata = utils.to_rainrate(refobs_field[-1], metadata)
refobs_field[refobs_field < 0.5] = 0.0
###############################################################################
# Plot the extrapolation, S-PROG and ANVIL nowcasts.
# --------------------------------------------------
#
# For comparison, the observed rain rate fields are also plotted. Growth and
# decay areas are marked with red and blue circles, respectively.
def plot_growth_decay_circles(ax):
circle = plt.Circle(
(360, 300), 25, color="b", clip_on=False, fill=False, zorder=1e9
)
ax.add_artist(circle)
circle = plt.Circle(
(420, 350), 30, color="b", clip_on=False, fill=False, zorder=1e9
)
ax.add_artist(circle)
circle = plt.Circle(
(405, 380), 30, color="b", clip_on=False, fill=False, zorder=1e9
)
ax.add_artist(circle)
circle = plt.Circle(
(420, 500), 25, color="b", clip_on=False, fill=False, zorder=1e9
)
ax.add_artist(circle)
circle = plt.Circle(
(480, 535), 30, color="b", clip_on=False, fill=False, zorder=1e9
)
ax.add_artist(circle)
circle = plt.Circle(
(330, 470), 35, color="b", clip_on=False, fill=False, zorder=1e9
)
ax.add_artist(circle)
circle = plt.Circle(
(505, 205), 30, color="b", clip_on=False, fill=False, zorder=1e9
)
ax.add_artist(circle)
circle = plt.Circle(
(440, 180), 30, color="r", clip_on=False, fill=False, zorder=1e9
)
ax.add_artist(circle)
circle = plt.Circle(
(590, 240), 30, color="r", clip_on=False, fill=False, zorder=1e9
)
ax.add_artist(circle)
circle = plt.Circle(
(585, 160), 15, color="r", clip_on=False, fill=False, zorder=1e9
)
ax.add_artist(circle)
fig = plt.figure(figsize=(10, 13))
ax = fig.add_subplot(321)
rainrate_field[-1][rainrate_field[-1] < 0.5] = 0.0
plot_precip_field(rainrate_field[-1])
plot_growth_decay_circles(ax)
ax.set_title("Obs. %s" % str(date))
ax = fig.add_subplot(322)
plot_precip_field(refobs_field)
plot_growth_decay_circles(ax)
ax.set_title("Obs. %s" % str(date + timedelta(minutes=15)))
ax = fig.add_subplot(323)
plot_precip_field(forecast_extrap[-1])
plot_growth_decay_circles(ax)
ax.set_title("Extrapolation +15 minutes")
ax = fig.add_subplot(324)
plot_precip_field(forecast_sprog[-1])
plot_growth_decay_circles(ax)
ax.set_title("S-PROG (with post-processing)\n +15 minutes")
ax = fig.add_subplot(325)
plot_precip_field(forecast_anvil[-1])
plot_growth_decay_circles(ax)
ax.set_title("ANVIL +15 minutes")
plt.show()
###############################################################################
# Remarks
# -------
#
# The extrapolation nowcast is static, i.e. it does not predict any growth or
# decay. While S-PROG is to some extent able to predict growth and decay, this
# this comes with loss of small-scale features. In addition, statistical
# post-processing needs to be applied to correct the bias and incorrect wet-area
# ratio introduced by the autoregressive process. ANVIL is able to do both:
# predict growth and decay and preserve the small-scale structure in a way that
# post-processing is not necessary.
================================================
FILE: examples/data_transformations.py
================================================
# -*- coding: utf-8 -*-
"""
Data transformations
====================
The statistics of intermittent precipitation rates are particularly non-Gaussian
and display an asymmetric distribution bounded at zero.
Such properties restrict the usage of well-established statistical methods that
assume symmetric or Gaussian data.
A common workaround is to introduce a suitable data transformation to approximate
a normal distribution.
In this example, we test the data transformation methods available in pysteps
in order to obtain a more symmetric distribution of the precipitation data
(excluding the zeros).
The currently available transformations include the Box-Cox, dB, square-root and
normal quantile transforms.
"""
from datetime import datetime
import matplotlib.pyplot as plt
import numpy as np
from pysteps import io, rcparams
from pysteps.utils import conversion, transformation
from scipy.stats import skew
###############################################################################
# Read the radar input images
# ---------------------------
#
# First, we will import the sequence of radar composites.
# You need the pysteps-data archive downloaded and the pystepsrc file
# configured with the data_source paths pointing to data folders.
# Selected case
date = datetime.strptime("201609281600", "%Y%m%d%H%M")
data_source = rcparams.data_sources["fmi"]
###############################################################################
# Load the data from the archive
# ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
root_path = data_source["root_path"]
path_fmt = data_source["path_fmt"]
fn_pattern = data_source["fn_pattern"]
fn_ext = data_source["fn_ext"]
importer_name = data_source["importer"]
importer_kwargs = data_source["importer_kwargs"]
timestep = data_source["timestep"]
# Get 1 hour of observations in the data archive
fns = io.archive.find_by_date(
date, root_path, path_fmt, fn_pattern, fn_ext, timestep, num_next_files=11
)
# Read the radar composites
importer = io.get_method(importer_name, "importer")
Z, _, metadata = io.read_timeseries(fns, importer, **importer_kwargs)
# Keep only positive rainfall values
Z = Z[Z > metadata["zerovalue"]].flatten()
# Convert to rain rate
R, metadata = conversion.to_rainrate(Z, metadata)
###############################################################################
# Test data transformations
# -------------------------
# Define method to visualize the data distribution with boxplots and plot the
# corresponding skewness
def plot_distribution(data, labels, skw):
N = len(data)
fig, ax1 = plt.subplots()
ax2 = ax1.twinx()
ax2.plot(np.arange(N + 2), np.zeros(N + 2), ":r")
ax1.boxplot(data, labels=labels, sym="", medianprops={"color": "k"})
ymax = []
for i in range(N):
y = skw[i]
x = i + 1
ax2.plot(x, y, "*r", ms=10, markeredgecolor="k")
ymax.append(np.max(data[i]))
# ylims
ylims = np.percentile(ymax, 50)
ax1.set_ylim((-1 * ylims, ylims))
ylims = np.max(np.abs(skw))
ax2.set_ylim((-1.1 * ylims, 1.1 * ylims))
# labels
ax1.set_ylabel(r"Standardized values [$\sigma$]")
ax2.set_ylabel(r"Skewness []", color="r")
ax2.tick_params(axis="y", labelcolor="r")
###############################################################################
# Box-Cox transform
# ~~~~~~~~~~~~~~~~~
# The Box-Cox transform is a well-known power transformation introduced by
# `Box and Cox (1964)`_. In its one-parameter version, the Box-Cox transform
# takes the form T(x) = ln(x) for lambda = 0, or T(x) = (x**lambda - 1)/lambda
# otherwise.
#
# To find a suitable lambda, we will experiment with a range of values
# and select the one that produces the most symmetric distribution, i.e., the
# lambda associated with a value of skewness closest to zero.
# To visually compare the results, the transformed data are standardized.
#
# .. _`Box and Cox (1964)`: https://doi.org/10.1111/j.2517-6161.1964.tb00553.x
data = []
labels = []
skw = []
# Test a range of values for the transformation parameter Lambda
Lambdas = np.linspace(-0.4, 0.4, 11)
for i, Lambda in enumerate(Lambdas):
R_, _ = transformation.boxcox_transform(R, metadata, Lambda)
R_ = (R_ - np.mean(R_)) / np.std(R_)
data.append(R_)
labels.append("{0:.2f}".format(Lambda))
skw.append(skew(R_)) # skewness
# Plot the transformed data distribution as a function of lambda
plot_distribution(data, labels, skw)
plt.title("Box-Cox transform")
plt.tight_layout()
plt.show()
# Best lambda
idx_best = np.argmin(np.abs(skw))
Lambda = Lambdas[idx_best]
print("Best parameter lambda: %.2f\n(skewness = %.2f)" % (Lambda, skw[idx_best]))
###############################################################################
# Compare data transformations
# ~~~~~~~~~~~~~~~~~~~~~~~~~~~~
data = []
labels = []
skw = []
###############################################################################
# Rain rates
# ~~~~~~~~~~
# First, let's have a look at the original rain rate values.
data.append((R - np.mean(R)) / np.std(R))
labels.append("R")
skw.append(skew(R))
###############################################################################
# dB transform
# ~~~~~~~~~~~~
# We transform the rainfall data into dB units: 10*log(R)
R_, _ = transformation.dB_transform(R, metadata)
data.append((R_ - np.mean(R_)) / np.std(R_))
labels.append("dB")
skw.append(skew(R_))
###############################################################################
# Square-root transform
# ~~~~~~~~~~~~~~~~~~~~~
# Transform the data using the square-root: sqrt(R)
R_, _ = transformation.sqrt_transform(R, metadata)
data.append((R_ - np.mean(R_)) / np.std(R_))
labels.append("sqrt")
skw.append(skew(R_))
###############################################################################
# Box-Cox transform
# ~~~~~~~~~~~~~~~~~
# We now apply the Box-Cox transform using the best parameter lambda found above.
R_, _ = transformation.boxcox_transform(R, metadata, Lambda)
data.append((R_ - np.mean(R_)) / np.std(R_))
labels.append("Box-Cox\n($\lambda=$%.2f)" % Lambda)
skw.append(skew(R_))
###############################################################################
# Normal quantile transform
# ~~~~~~~~~~~~~~~~~~~~~~~~~
# At last, we apply the empirical normal quantile (NQ) transform as described in
# `Bogner et al (2012)`_.
#
# .. _`Bogner et al (2012)`: http://dx.doi.org/10.5194/hess-16-1085-2012
R_, _ = transformation.NQ_transform(R, metadata)
data.append((R_ - np.mean(R_)) / np.std(R_))
labels.append("NQ")
skw.append(skew(R_))
###############################################################################
# By plotting all the results, we can notice first of all the strongly asymmetric
# distribution of the original data (R) and that all transformations manage to
# reduce its skewness. Among these, the Box-Cox transform (using the best parameter
# lambda) and the normal quantile (NQ) transform provide the best correction.
# Despite not producing a perfectly symmetric distribution, the square-root (sqrt)
# transform has the strong advantage of being defined for zeros, too, while all
# other transformations need an arbitrary rule for non-positive values.
plot_distribution(data, labels, skw)
plt.title("Data transforms")
plt.tight_layout()
plt.show()
================================================
FILE: examples/ens_kalman_filter_blended_forecast.py
================================================
# -*- coding: utf-8 -*-
"""
Ensemble-based Blending
=======================
This tutorial demonstrates how to construct a blended rainfall forecast by combining
an ensemble nowcast with an ensemble Numerical Weather Prediction (NWP) forecast.
The method follows the Reduced-Space Ensemble Kalman Filter approach described in
:cite:`Nerini2019MWR`.
The procedure starts from the most recent radar observations. In the **prediction step**,
a stochastic radar extrapolation technique generates short-term forecasts. In the
**correction step**, these forecasts are updated using information from the latest
ensemble NWP run. To make the matrix operations tractable, the Bayesian update is carried
out in the subspace defined by the leading principal components—hence the term *reduced
space*.
The datasets used in this tutorial are provided by the German Weather Service (DWD).
"""
import os
from datetime import datetime, timedelta
import numpy as np
from matplotlib import pyplot as plt
import pysteps
from pysteps import io, rcparams, blending
from pysteps.utils import aggregate_fields_space
from pysteps.visualization import plot_precip_field
import pysteps_nwp_importers
################################################################################
# Read the radar images and the NWP forecast
# ------------------------------------------
#
# First, we import a sequence of 4 images of 5-minute radar composites
# and the corresponding NWP rainfall forecast that was available at that time.
#
# You need the pysteps-data archive downloaded and the pystepsrc file
# configured with the data_source paths pointing to data folders.
# Additionally, the pysteps-nwp-importers plugin needs to be installed, see
# https://github.com/pySTEPS/pysteps-nwp-importers.
# Selected case
date_radar = datetime.strptime("202506041645", "%Y%m%d%H%M")
# The last NWP forecast was issued at 16:00 - the blending tool will be able
# to find the correct lead times itself.
date_nwp = datetime.strptime("202506041600", "%Y%m%d%H%M")
radar_data_source = rcparams.data_sources["dwd"]
nwp_data_source = rcparams.data_sources["dwd_nwp"]
###############################################################################
# Load the data from the archive
# ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
root_path = radar_data_source["root_path"]
path_fmt = radar_data_source["path_fmt"]
fn_pattern = radar_data_source["fn_pattern"]
fn_ext = radar_data_source["fn_ext"]
importer_name = radar_data_source["importer"]
importer_kwargs = radar_data_source["importer_kwargs"]
timestep_radar = radar_data_source["timestep"]
# Find the radar files in the archive
fns = io.find_by_date(
date_radar,
root_path,
path_fmt,
fn_pattern,
fn_ext,
timestep_radar,
num_prev_files=2,
)
# Read the radar composites (which are already in mm/h)
importer = io.get_method(importer_name, "importer")
radar_precip, _, radar_metadata = io.read_timeseries(fns, importer, **importer_kwargs)
# Import the NWP data
filename = os.path.join(
nwp_data_source["root_path"],
datetime.strftime(date_nwp, nwp_data_source["path_fmt"]),
datetime.strftime(date_nwp, nwp_data_source["fn_pattern"])
+ "."
+ nwp_data_source["fn_ext"],
)
nwp_importer = io.get_method("dwd_nwp", "importer")
kwargs = nwp_data_source["importer_kwargs"]
# Resolve grid_file_path relative to PYSTEPS_DATA_PATH
kwargs["grid_file_path"] = os.path.join(
os.environ["PYSTEPS_DATA_PATH"], kwargs["grid_file_path"]
)
nwp_precip, _, nwp_metadata = nwp_importer(filename, **kwargs)
# We lower the number of ens members to 10 to reduce the memory needs in the
# example here. However, it is advised to have a minimum of 20 members for the
# Reduced-Space Ensemble Kalman filter approach
nwp_precip = nwp_precip[:, 0:10, :].astype("single")
################################################################################
# Pre-processing steps
# --------------------
# Set the zerovalue and precipitation thresholds (these are fixed from DWD)
prec_thr = 0.049
zerovalue = 0.027
# Transform the zerovalue and precipitation thresholds to dBR
log_thr_prec = 10.0 * np.log10(prec_thr)
log_zerovalue = 10.0 * np.log10(zerovalue)
# Reproject the DWD ICON NWP data onto a regular grid
nwp_metadata["clon"] = nwp_precip["longitude"].values
nwp_metadata["clat"] = nwp_precip["latitude"].values
# We change the time step from the DWD NWP data to 15 min (it is actually 5 min)
# to have a longer forecast horizon available for this example, as pysteps_data
# only contains 1 hour of DWD forecast data (to minimize storage).
nwp_metadata["accutime"] = 15.0
nwp_precip = (
nwp_precip.values.astype("single") * 3.0
) # (to account for the change in time step from 5 to 15 min)
# Reproject ID2 data onto a regular grid
nwp_precip_rprj, nwp_metadata_rprj = (
pysteps_nwp_importers.importer_dwd_nwp.unstructured2regular(
nwp_precip, nwp_metadata, radar_metadata
)
)
nwp_precip = None
# Upscale both the radar and NWP data to a twice as coarse resolution to lower
# the memory needs (for this example)
radar_precip, radar_metadata = aggregate_fields_space(
radar_precip, radar_metadata, radar_metadata["xpixelsize"] * 4
)
nwp_precip_rprj, nwp_metadata_rprj = aggregate_fields_space(
nwp_precip_rprj.astype("single"),
nwp_metadata_rprj,
nwp_metadata_rprj["xpixelsize"] * 4,
)
# Make sure the units are in mm/h
converter = pysteps.utils.get_method("mm/h")
radar_precip, radar_metadata = converter(
radar_precip, radar_metadata
) # The radar data should already be in mm/h
nwp_precip_rprj, nwp_metadata_rprj = converter(nwp_precip_rprj, nwp_metadata_rprj)
# Threshold the data
radar_precip[radar_precip < prec_thr] = 0.0
nwp_precip_rprj[nwp_precip_rprj < prec_thr] = 0.0
# Plot the radar rainfall field and the first time step and first ensemble member
# of the NWP forecast.
date_str = datetime.strftime(date_radar, "%Y-%m-%d %H:%M")
plt.figure(figsize=(10, 5))
plt.subplot(121)
plot_precip_field(
radar_precip[-1, :, :],
geodata=radar_metadata,
title=f"Radar observation at {date_str}",
colorscale="STEPS-NL",
)
plt.subplot(122)
plot_precip_field(
nwp_precip_rprj[0, 0, :, :],
geodata=nwp_metadata_rprj,
title=f"NWP forecast at {date_str}",
colorscale="STEPS-NL",
)
plt.tight_layout()
plt.show()
# transform the data to dB
transformer = pysteps.utils.get_method("dB")
radar_precip, radar_metadata = transformer(
radar_precip, radar_metadata, threshold=prec_thr, zerovalue=log_zerovalue
)
nwp_precip_rprj, nwp_metadata_rprj = transformer(
nwp_precip_rprj, nwp_metadata_rprj, threshold=prec_thr, zerovalue=log_zerovalue
)
###############################################################################
# Determine the velocity fields
# ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
#
# In contrast to the STEPS blending method, no motion field for the NWP fields
# is needed in the ensemble kalman filter blending approach.
# Estimate the motion vector field
oflow_method = pysteps.motion.get_method("lucaskanade")
velocity_radar = oflow_method(radar_precip)
################################################################################
# The blended forecast
# ~~~~~~~~~~~~~~~~~~~~
# Set the timestamps for radar_precip and nwp_precip_rprj
timestamps_radar = np.array(
sorted(
[
date_radar - timedelta(minutes=i * timestep_radar)
for i in range(len(radar_precip))
]
)
)
timestamps_nwp = np.array(
sorted(
[
date_nwp + timedelta(minutes=i * int(nwp_metadata_rprj["accutime"]))
for i in range(nwp_precip_rprj.shape[0])
]
)
)
# Set the combination kwargs
combination_kwargs = dict(
n_tapering=0, # Tapering parameter: controls how many diagonals of the covariance matrix are kept (0 = no tapering)
non_precip_mask=True, # Specifies whether the computation should be truncated on grid boxes where at least a minimum number of ens. members forecast precipitation.
n_ens_prec=1, # Minimum number of ens. members that forecast precip for the above-mentioned mask.
lien_criterion=True, # Specifies wheter the Lien criterion should be applied.
n_lien=5, # Minimum number of ensemble members that forecast precipitation for the Lien criterion (equals half the ens. members here)
prob_matching="iterative", # The type of probability matching used.
inflation_factor_bg=3.0, # Inflation factor of the background (NWC) covariance matrix. (this value indicates a faster convergence towards the NWP ensemble)
inflation_factor_obs=1.0, # Inflation factor of the observation (NWP) covariance matrix.
offset_bg=0.0, # Offset of the background (NWC) covariance matrix.
offset_obs=0.0, # Offset of the observation (NWP) covariance matrix.
nwp_hres_eff=14.0, # Effective horizontal resolution of the utilized NWP model (in km here).
sampling_prob_source="ensemble", # Computation method of the sampling probability for the probability matching. 'ensemble' computes this probability as the ratio between the ensemble differences.
use_accum_sampling_prob=False, # Specifies whether the current sampling probability should be used for the probability matching or a probability integrated over the previous forecast time.
)
# Call the PCA EnKF method
blending_method = blending.get_method("pca_enkf")
precip_forecast = blending_method(
obs_precip=radar_precip, # Radar data in dBR
obs_timestamps=timestamps_radar, # Radar timestamps
nwp_precip=nwp_precip_rprj, # NWP in dBR
nwp_timestamps=timestamps_nwp, # NWP timestamps
velocity=velocity_radar, # Velocity vector field
forecast_horizon=120, # Forecast length (horizon) in minutes - only a short forecast horizon due to the limited dataset length stored here.
issuetime=date_radar, # Forecast issue time as datetime object
n_ens_members=10, # No. of ensemble members
precip_mask_dilation=1, # Dilation of precipitation mask in grid boxes
n_cascade_levels=6, # No. of cascade levels
precip_thr=log_thr_prec, # Precip threshold
norain_thr=0.0005, # Minimum of 0.5% precip needed, otherwise 'zero rainfall'
num_workers=4, # No. of parallel threads
noise_stddev_adj="auto", # Standard deviation adjustment
noise_method="ssft", # SSFT as noise method
enable_combination=True, # Enable combination
noise_kwargs={"win_size": (512, 512), "win_fun": "hann", "overlap": 0.5},
extrap_kwargs={"interp_order": 3, "map_coordinates_mode": "nearest"},
combination_kwargs=combination_kwargs,
filter_kwargs={"include_mean": True},
)
# Transform the data back into mm/h
precip_forecast, _ = converter(precip_forecast, radar_metadata)
radar_precip, _ = converter(radar_precip, radar_metadata)
nwp_precip, _ = converter(nwp_precip_rprj, nwp_metadata_rprj)
################################################################################
# Visualize the output
# ~~~~~~~~~~~~~~~~~~~~
#
# The NWP rainfall forecast has a much lower weight than the radar-based
# extrapolation # forecast at the issue time of the forecast (+0 min). Therefore,
# the first time steps consist mostly of the extrapolation. However, near the end
# of the forecast (+180 min), the NWP share in the blended forecast has become
# the more dominant contribution to the forecast and thus the forecast starts
# to resemble the NWP forecast.
fig = plt.figure(figsize=(5, 12))
leadtimes_min = [15, 30, 45, 60, 90, 120]
n_leadtimes = len(leadtimes_min)
for n, leadtime in enumerate(leadtimes_min):
# Nowcast with blending into NWP
plt.subplot(n_leadtimes, 2, n * 2 + 1)
plot_precip_field(
precip_forecast[0, int(leadtime / timestep_radar) - 1, :, :],
geodata=radar_metadata,
title=f"Blended +{leadtime} min",
axis="off",
colorscale="STEPS-NL",
colorbar=False,
)
# Raw NWP forecast
plt.subplot(n_leadtimes, 2, n * 2 + 2)
plot_precip_field(
nwp_precip[int(leadtime / int(nwp_metadata_rprj["accutime"])) - 1, 0, :, :],
geodata=nwp_metadata_rprj,
title=f"NWP +{leadtime} min",
axis="off",
colorscale="STEPS-NL",
colorbar=False,
)
################################################################################
# References
# ~~~~~~~~~~
#
# Nerini, D., Foresti, L., Leuenberger, D., Robert, S., Germann, U. 2019. "A
# Reduced-Space Ensemble Kalman Filter Approach for Flow-Dependent Integration
# of Radar Extrapolation Nowcasts and NWP Precipitation Ensembles." Monthly
# Weather Review 147(3): 987-1006. https://doi.org/10.1175/MWR-D-18-0258.1.
================================================
FILE: examples/linda_nowcasts.py
================================================
#!/bin/env python
"""
LINDA nowcasts
==============
This example shows how to compute and plot a deterministic and ensemble LINDA
nowcasts using Swiss radar data.
"""
from datetime import datetime
import warnings
warnings.simplefilter("ignore")
import matplotlib.pyplot as plt
from pysteps import io, rcparams
from pysteps.motion.lucaskanade import dense_lucaskanade
from pysteps.nowcasts import linda, sprog, steps
from pysteps.utils import conversion, dimension, transformation
from pysteps.visualization import plot_precip_field
###############################################################################
# Read the input rain rate fields
# -------------------------------
date = datetime.strptime("201701311200", "%Y%m%d%H%M")
data_source = "mch"
# Read the data source information from rcparams
datasource_params = rcparams.data_sources[data_source]
# Find the radar files in the archive
fns = io.find_by_date(
date,
datasource_params["root_path"],
datasource_params["path_fmt"],
datasource_params["fn_pattern"],
datasource_params["fn_ext"],
datasource_params["timestep"],
num_prev_files=2,
)
# Read the data from the archive
importer = io.get_method(datasource_params["importer"], "importer")
reflectivity, _, metadata = io.read_timeseries(
fns, importer, **datasource_params["importer_kwargs"]
)
# Convert reflectivity to rain rate
rainrate, metadata = conversion.to_rainrate(reflectivity, metadata)
# Upscale data to 2 km to reduce computation time
rainrate, metadata = dimension.aggregate_fields_space(rainrate, metadata, 2000)
# Plot the most recent rain rate field
plt.figure()
plot_precip_field(rainrate[-1, :, :])
plt.show()
###############################################################################
# Estimate the advection field
# ----------------------------
# The advection field is estimated using the Lucas-Kanade optical flow
advection = dense_lucaskanade(rainrate, verbose=True)
###############################################################################
# Deterministic nowcast
# ---------------------
# Compute 30-minute LINDA nowcast with 8 parallel workers
# Restrict the number of features to 15 to reduce computation time
nowcast_linda = linda.forecast(
rainrate,
advection,
6,
max_num_features=15,
add_perturbations=False,
num_workers=8,
measure_time=True,
)[0]
# Compute S-PROG nowcast for comparison
rainrate_db, _ = transformation.dB_transform(
rainrate, metadata, threshold=0.1, zerovalue=-15.0
)
nowcast_sprog = sprog.forecast(
rainrate_db[-3:, :, :],
advection,
6,
n_cascade_levels=6,
precip_thr=-10.0,
)
# Convert reflectivity nowcast to rain rate
nowcast_sprog = transformation.dB_transform(
nowcast_sprog, threshold=-10.0, inverse=True
)[0]
# Plot the nowcasts
fig = plt.figure(figsize=(9, 4))
ax = fig.add_subplot(1, 2, 1)
plot_precip_field(
nowcast_linda[-1, :, :],
title="LINDA (+ 30 min)",
)
ax = fig.add_subplot(1, 2, 2)
plot_precip_field(
nowcast_sprog[-1, :, :],
title="S-PROG (+ 30 min)",
)
plt.show()
###############################################################################
# The above figure shows that the filtering scheme implemented in LINDA preserves
# small-scale and band-shaped features better than S-PROG. This is because the
# former uses a localized elliptical convolution kernel instead of the
# cascade-based autoregressive process, where the parameters are estimated over
# the whole domain.
###############################################################################
# Probabilistic nowcast
# ---------------------
# Compute 30-minute LINDA nowcast ensemble with 40 members and 8 parallel workers
nowcast_linda = linda.forecast(
rainrate,
advection,
6,
max_num_features=15,
add_perturbations=True,
vel_pert_method=None,
n_ens_members=40,
num_workers=8,
measure_time=True,
)[0]
# Compute 40-member STEPS nowcast for comparison
nowcast_steps = steps.forecast(
rainrate_db[-3:, :, :],
advection,
6,
40,
n_cascade_levels=6,
precip_thr=-10.0,
mask_method="incremental",
kmperpixel=2.0,
timestep=datasource_params["timestep"],
vel_pert_method=None,
)
# Convert reflectivity nowcast to rain rate
nowcast_steps = transformation.dB_transform(
nowcast_steps, threshold=-10.0, inverse=True
)[0]
# Plot two ensemble members of both nowcasts
fig = plt.figure()
for i in range(2):
ax = fig.add_subplot(2, 2, i + 1)
ax = plot_precip_field(
nowcast_linda[i, -1, :, :], geodata=metadata, colorbar=False, axis="off"
)
ax.set_title(f"LINDA Member {i+1}")
for i in range(2):
ax = fig.add_subplot(2, 2, 3 + i)
ax = plot_precip_field(
nowcast_steps[i, -1, :, :], geodata=metadata, colorbar=False, axis="off"
)
ax.set_title(f"STEPS Member {i+1}")
###############################################################################
# The above figure shows the main difference between LINDA and STEPS. In
# addition to the convolution kernel, another improvement in LINDA is a
# localized perturbation generator using the short-space Fourier transform
# (SSFT) and a spatially variable marginal distribution. As a result, the
# LINDA ensemble members preserve the anisotropic and small-scale structures
# considerably better than STEPS.
plt.tight_layout()
plt.show()
================================================
FILE: examples/my_first_nowcast.ipynb
================================================
{
"cells": [
{
"cell_type": "markdown",
"metadata": {
"colab_type": "text",
"id": "L_dntwSQBnbK"
},
"source": [
"[](https://colab.research.google.com/github/pySTEPS/pysteps/blob/master/examples/my_first_nowcast.ipynb)\n",
"\n",
"# My first precipitation nowcast\n",
"\n",
"In this example, we will use pysteps to compute and plot an extrapolation nowcast using the NSSL's Multi-Radar/Multi-Sensor System\n",
"([MRMS](https://www.nssl.noaa.gov/projects/mrms/)) rain rate product.\n",
"\n",
"The MRMS precipitation product is available every 2 minutes, over the contiguous US. \n",
"Each precipitation composite has 3500 x 7000 grid points, separated 1 km from each other.\n",
"\n",
"## Set-up Colab environment\n",
"\n",
"**Important**: In colab, execute this section one cell at a time. Trying to excecute all the cells at once may results in cells being skipped and some dependencies not being installed.\n",
"\n",
"First, let's set up our working environment. Note that these steps are only needed to work with google colab. \n",
"\n",
"To install pysteps locally, you can follow [these instructions](https://pysteps.readthedocs.io/en/latest/user_guide/install_pysteps.html).\n",
"\n",
"First, let's install the latest Pysteps version from the Python Package Index (PyPI) using pip. This will also install the minimal dependencies needed to run pysteps. \n",
"\n",
"#### Install optional dependencies\n",
"\n",
"Now, let's install the optional dependendies that will allow us to plot and read the example data.\n",
"- pygrib: to read the MRMS data grib format\n",
"- pyproj: needed by pygrib\n",
"\n",
"**NOTE:** Do not import pysteps in this notebook until the following optional dependencies are loaded. Otherwise, pysteps will assume that they are not installed and some of its functionalities won't work."
]
},
{
"cell_type": "code",
"execution_count": null,
"metadata": {
"colab": {},
"colab_type": "code",
"id": "mFx4hq_DBtp-"
},
"outputs": [],
"source": [
"# These libraries are needed for the pygrib library in Colab.\n",
"# Note that is needed if you install pygrib using pip.\n",
"# If you use conda, the libraries will be installed automatically.\n",
"! apt-get install libeccodes-dev libproj-dev\n",
"\n",
"# Install the python packages\n",
"! pip install pyproj\n",
"! pip install pygrib"
]
},
{
"cell_type": "code",
"execution_count": null,
"metadata": {
"colab": {},
"colab_type": "code",
"id": "6BF2paxnTuGB"
},
"outputs": [],
"source": [
"# Uninstall existing shapely\n",
"# We will re-install shapely in the next step by ignoring the binary\n",
"# wheels to make it compatible with other modules that depend on\n",
"# GEOS, such as Cartopy (used here).\n",
"!pip uninstall --yes shapely"
]
},
{
"cell_type": "code",
"execution_count": null,
"metadata": {
"colab": {},
"colab_type": "code",
"id": "7x8Hx_4hE_BU"
},
"outputs": [],
"source": [
"# To install cartopy in Colab using pip, we need to install the library\n",
"# dependencies first.\n",
"\n",
"!apt-get install -qq libgdal-dev libgeos-dev\n",
"!pip install shapely --no-binary shapely\n",
"!pip install cartopy"
]
},
{
"cell_type": "markdown",
"metadata": {
"colab_type": "text",
"id": "ybD55ZJhmdYa"
},
"source": [
"#### Install pysteps\n",
"\n",
"Now that all dependencies are installed, we can install pysteps."
]
},
{
"cell_type": "code",
"execution_count": null,
"metadata": {
"colab": {},
"colab_type": "code",
"id": "VA7zp3nRmhfF"
},
"outputs": [],
"source": [
"# ! pip install git+https://github.com/pySTEPS/pysteps\n",
"! pip install pysteps"
]
},
{
"cell_type": "markdown",
"metadata": {
"colab_type": "text",
"id": "-AkfR6JSBujn"
},
"source": [
"## Getting the example data\n",
"\n",
"Now that we have the environment ready, let's install the example data and configure the pysteps's default parameters by following [this tutorial](https://pysteps.readthedocs.io/en/latest/user_guide/example_data.html).\n",
"\n",
"First, we will use the [pysteps.datasets.download_pysteps_data()](https://pysteps.readthedocs.io/en/latest/generated/pysteps.datasets.download_pysteps_data.html) function to download the data.\n"
]
},
{
"cell_type": "code",
"execution_count": null,
"metadata": {
"colab": {},
"colab_type": "code",
"id": "vri-R_ZVGihj"
},
"outputs": [],
"source": [
"# Import the helper functions\n",
"from pysteps.datasets import download_pysteps_data, create_default_pystepsrc\n",
"\n",
"# Download the pysteps data in the \"pysteps_data\"\n",
"download_pysteps_data(\"pysteps_data\")"
]
},
{
"cell_type": "markdown",
"metadata": {
"colab_type": "text",
"id": "wdKfjliCKXhx"
},
"source": [
"Next, we need to create a default configuration file that points to the downloaded data. \n",
"By default, pysteps will place the configuration file in `$HOME/.pysteps` (unix and Mac OS X) or `$USERPROFILE/pysteps` (windows).\n",
"To quickly create a configuration file, we will use the [pysteps.datasets.create_default_pystepsrc()](https://pysteps.readthedocs.io/en/latest/generated/pysteps.datasets.create_default_pystepsrc.html#pysteps.datasets.create_default_pystepsrc) helper function."
]
},
{
"cell_type": "code",
"execution_count": null,
"metadata": {
"colab": {},
"colab_type": "code",
"id": "pGdKHa36H5JX"
},
"outputs": [],
"source": [
"# If the configuration file is placed in one of the default locations\n",
"# (https://pysteps.readthedocs.io/en/latest/user_guide/set_pystepsrc.html#configuration-file-lookup)\n",
"# it will be loaded automatically when pysteps is imported.\n",
"config_file_path = create_default_pystepsrc(\"pysteps_data\")"
]
},
{
"cell_type": "markdown",
"metadata": {
"colab_type": "text",
"id": "DAFUJgR5K1CS"
},
"source": [
"Since pysteps was already initialized in this notebook, we need to load the new configuration file and update the default configuration."
]
},
{
"cell_type": "code",
"execution_count": null,
"metadata": {
"colab": {},
"colab_type": "code",
"id": "tMIbQLPAK42h"
},
"outputs": [],
"source": [
"# Import pysteps and load the new configuration file\n",
"import pysteps\n",
"\n",
"_ = pysteps.load_config_file(config_file_path, verbose=True)"
]
},
{
"cell_type": "markdown",
"metadata": {
"colab_type": "text",
"id": "SzSqp1DFJ0M9"
},
"source": [
"Let's see what the default parameters look like (these are stored in the\n",
"[pystepsrc file](https://pysteps.readthedocs.io/en/latest/user_guide/set_pystepsrc.html)). We will be using them to load the MRMS data set."
]
},
{
"cell_type": "code",
"execution_count": null,
"metadata": {
"colab": {},
"colab_type": "code",
"id": "6Gr65nH4BnbP"
},
"outputs": [],
"source": [
"# The default parameters are stored in pysteps.rcparams.\n",
"from pprint import pprint\n",
"\n",
"pprint(pysteps.rcparams.data_sources[\"mrms\"])"
]
},
{
"cell_type": "markdown",
"metadata": {
"colab_type": "text",
"id": "9M_buv7WBnbf"
},
"source": [
"This should have printed the following lines:\n",
"\n",
"- `fn_ext`: 'grib2' -- The file extension\n",
"- `fn_pattern`: 'PrecipRate_00.00_%Y%m%d-%H%M%S' -- The file naming convention of the MRMS data.\n",
"- `importer`: 'mrms_grib' -- The name of the importer for the MRMS data.\n",
"- `importer_kwargs`: {} -- Extra options provided to the importer. None in this example.\n",
"- `path_fmt`: '%Y/%m/%d' -- The folder structure in which the files are stored. Here, year/month/day/filename.\n",
"- `root_path`: '/content/pysteps_data/mrms' -- The root path of the MRMS-data.\n",
"- `timestep`: 2 -- The temporal interval of the (radar) rainfall data\n",
"\n",
"Note that the default `timestep` parameter is 2 minutes, which corresponds to the time interval at which the MRMS product is available.\n",
"\n",
"## Load the MRMS example data\n",
"\n",
"Now that we have installed the example data, let's import the example MRMS dataset using the [load_dataset()](https://pysteps.readthedocs.io/en/latest/generated/pysteps.datasets.load_dataset.html) helper function from the `pysteps.datasets` module.\n",
"\n",
"We import 1 hour and 10 minutes of data, which corresponds to a sequence of 35 frames of 2-D precipitation composites.\n",
"Note that importing the data takes approximately 30 seconds."
]
},
{
"cell_type": "code",
"execution_count": null,
"metadata": {
"colab": {},
"colab_type": "code",
"id": "-8Q4e58VBnbl"
},
"outputs": [],
"source": [
"from pysteps.datasets import load_dataset\n",
"\n",
"# We'll import the time module to measure the time the importer needed\n",
"import time\n",
"\n",
"start_time = time.time()\n",
"\n",
"# Import the data\n",
"precipitation, metadata, timestep = load_dataset(\n",
" \"mrms\", frames=35\n",
") # precipitation in mm/h\n",
"\n",
"end_time = time.time()\n",
"\n",
"print(\"Precipitation data imported\")\n",
"print(\"Importing the data took \", (end_time - start_time), \" seconds\")"
]
},
{
"cell_type": "markdown",
"metadata": {
"colab_type": "text",
"id": "btiTxYYMBnby"
},
"source": [
"Let's have a look at the values returned by the `load_dataset()` function. \n",
"\n",
"- `precipitation`: A numpy array with (time, latitude, longitude) dimensions.\n",
"- `metadata`: A dictionary with additional information (pixel sizes, map projections, etc.).\n",
"- `timestep`: Time separation between each sample (in minutes)"
]
},
{
"cell_type": "code",
"execution_count": null,
"metadata": {
"colab": {},
"colab_type": "code",
"id": "WqUHbJ_qBnb3"
},
"outputs": [],
"source": [
"# Let's inspect the shape of the imported data array\n",
"precipitation.shape"
]
},
{
"cell_type": "markdown",
"metadata": {
"colab_type": "text",
"id": "xa8woT0ABncD"
},
"source": [
"Note that the shape of the precipitation is 4 times smaller than the raw MRMS data (3500 x 7000).\n",
"The `load_dataset()` function uses the default parameters from `importers` to read the data. By default, the MRMS importer upscales the data 4x. That is, from ~1km resolution to ~4km. It also uses single precision to reduce the memory requirements.\n",
"Thanks to the upscaling, the memory footprint of this example dataset is ~200Mb instead of the 3.1Gb of the raw (3500 x 7000) data. "
]
},
{
"cell_type": "code",
"execution_count": null,
"metadata": {
"colab": {},
"colab_type": "code",
"id": "22O2YXrfBncG"
},
"outputs": [],
"source": [
"timestep # In minutes"
]
},
{
"cell_type": "code",
"execution_count": null,
"metadata": {
"colab": {},
"colab_type": "code",
"id": "J8_4hwcXBncT"
},
"outputs": [],
"source": [
"pprint(metadata)"
]
},
{
"cell_type": "markdown",
"metadata": {
"colab_type": "text",
"id": "uQREORtJBnch"
},
"source": [
"# Time to make a nowcast\n",
"\n",
"So far, we have 1 hour and 10 minutes of precipitation images, separated 2 minutes from each other.\n",
"But, how do we use that data to run a precipitation forecast? \n",
"\n",
"A simple way is by extrapolating the precipitation field, assuming it will continue to move as observed in the recent past, and without changes in intensity. This is commonly known as *Lagrangian persistence*.\n",
"\n",
"The first step to run our nowcast based on Lagrangian persistence, is the estimation of the motion field from a sequence of past precipitation observations.\n",
"We use the Lucas-Kanade (LK) optical flow method implemented in pysteps.\n",
"This method follows a local tracking approach that relies on the OpenCV package.\n",
"Local features are tracked in a sequence of two or more radar images.\n",
"The scheme includes a final interpolation step to produce a smooth field of motion vectors.\n",
"Other optical flow methods are also available in pysteps. \n",
"Check the full list [here](https://pysteps.readthedocs.io/en/latest/pysteps_reference/motion.html).\n",
"\n",
"Now let's use the first 5 precipitation images (10 min) to estimate the motion field of the radar pattern and the remaining 30 images (1h) to evaluate the quality of our forecast."
]
},
{
"cell_type": "code",
"execution_count": null,
"metadata": {
"colab": {},
"colab_type": "code",
"id": "jcb2Sf6xBnck"
},
"outputs": [],
"source": [
"# precipitation[0:5] -> Used to find motion (past data). Let's call it training precip.\n",
"train_precip = precipitation[0:5]\n",
"\n",
"# precipitation[5:] -> Used to evaluate forecasts (future data, not available in \"real\" forecast situation)\n",
"# Let's call it observed precipitation because we will use it to compare our forecast with the actual observations.\n",
"observed_precip = precipitation[3:]"
]
},
{
"cell_type": "markdown",
"metadata": {
"colab_type": "text",
"id": "xt1TbB0RBncu"
},
"source": [
"Let's see what this 'training' precipitation event looks like using the [pysteps.visualization.plot_precip_field](https://pysteps.readthedocs.io/en/latest/generated/pysteps.visualization.precipfields.plot_precip_field.html) function."
]
},
{
"cell_type": "code",
"execution_count": null,
"metadata": {
"colab": {},
"colab_type": "code",
"id": "bmNYLo1jBncw"
},
"outputs": [],
"source": [
"from matplotlib import pyplot as plt\n",
"from pysteps.visualization import plot_precip_field\n",
"\n",
"# Set a figure size that looks nice ;)\n",
"plt.figure(figsize=(9, 5), dpi=100)\n",
"\n",
"# Plot the last rainfall field in the \"training\" data.\n",
"# train_precip[-1] -> Last available composite for nowcasting.\n",
"plot_precip_field(train_precip[-1], geodata=metadata, axis=\"off\")\n",
"plt.show() # (This line is actually not needed if you are using jupyter notebooks)"
]
},
{
"cell_type": "markdown",
"metadata": {
"colab_type": "text",
"id": "NVRfJm11Bnc7"
},
"source": [
"Did you note the **shaded grey** regions? Those are the regions were no valid observations where available to estimate the precipitation (e.g., due to ground clutter, no radar coverage, or radar beam blockage).\n",
"Those regions need to be handled with care when we run our nowcast.\n",
"\n",
"### Data exploration\n",
"\n",
"Before we produce a forecast, let's explore the precipitation data. In particular, let's see how the distribution of the rain rate values looks."
]
},
{
"cell_type": "code",
"execution_count": null,
"metadata": {
"colab": {},
"colab_type": "code",
"id": "WER6RttPBnc9"
},
"outputs": [],
"source": [
"import numpy as np\n",
"\n",
"# Let's define some plotting default parameters for the next plots\n",
"# Note: This is not strictly needed.\n",
"plt.rc(\"figure\", figsize=(4, 4))\n",
"plt.rc(\"figure\", dpi=100)\n",
"plt.rc(\"font\", size=14) # controls default text sizes\n",
"plt.rc(\"axes\", titlesize=14) # fontsize of the axes title\n",
"plt.rc(\"axes\", labelsize=14) # fontsize of the x and y labels\n",
"plt.rc(\"xtick\", labelsize=14) # fontsize of the tick labels\n",
"plt.rc(\"ytick\", labelsize=14) # fontsize of the tick labels\n",
"\n",
"# Let's use the last available composite for nowcasting from the \"training\" data (train_precip[-1])\n",
"# Also, we will discard any invalid value.\n",
"valid_precip_values = train_precip[-1][~np.isnan(train_precip[-1])]\n",
"\n",
"# Plot the histogram\n",
"bins = np.concatenate(([-0.01, 0.01], np.linspace(1, 40, 39)))\n",
"plt.hist(valid_precip_values, bins=bins, log=True, edgecolor=\"black\")\n",
"plt.autoscale(tight=True, axis=\"x\")\n",
"plt.xlabel(\"Rainfall intensity [mm/h]\")\n",
"plt.ylabel(\"Counts\")\n",
"plt.title(\"Precipitation rain rate histogram in mm/h units\")\n",
"plt.show()"
]
},
{
"cell_type": "markdown",
"metadata": {
"colab_type": "text",
"id": "O6TvIXS3BndH"
},
"source": [
"The histogram shows that rain rate values have a non-Gaussian and asymmetric distribution that is bounded at zero. Also, the probability of occurrence decays extremely fast with increasing rain rate values (note the logarithmic y-axis).\n",
"\n",
"\n",
"For better performance of the motion estimation algorithms, we can convert the rain rate values (in mm/h) to a more log-normal distribution of rain rates by applying the following logarithmic transformation:\n",
"\n",
"\\begin{equation}\n",
"R\\rightarrow\n",
"\\begin{cases}\n",
" 10\\log_{10}R, & \\text{if } R\\geq 0.1\\text{mm h$^{-1}$} \\\\\n",
" -15, & \\text{otherwise}\n",
"\\end{cases}\n",
"\\end{equation}\n",
"\n",
"The transformed precipitation corresponds to logarithmic rain rates in units of dBR. The value of −15 dBR is equivalent to assigning a rain rate of approximately 0.03 mm h$^{−1}$ to the zeros. "
]
},
{
"cell_type": "code",
"execution_count": null,
"metadata": {
"colab": {},
"colab_type": "code",
"id": "hgA4PeapBndK"
},
"outputs": [],
"source": [
"from pysteps.utils import transformation\n",
"\n",
"# Log-transform the data to dBR.\n",
"# The threshold of 0.1 mm/h sets the fill value to -15 dBR.\n",
"train_precip_dbr, metadata_dbr = transformation.dB_transform(\n",
" train_precip, metadata, threshold=0.1, zerovalue=-15.0\n",
")"
]
},
{
"cell_type": "markdown",
"metadata": {
"colab_type": "text",
"id": "Nx3VESBlBndU"
},
"source": [
"Let's inspect the resulting **transformed precipitation** distribution."
]
},
{
"cell_type": "code",
"execution_count": null,
"metadata": {
"colab": {},
"colab_type": "code",
"id": "rYS5hBIGBndX"
},
"outputs": [],
"source": [
"# Only use the valid data!\n",
"valid_precip_dbr = train_precip_dbr[-1][~np.isnan(train_precip_dbr[-1])]\n",
"\n",
"plt.figure(figsize=(4, 4), dpi=100)\n",
"\n",
"# Plot the histogram\n",
"counts, bins, _ = plt.hist(valid_precip_dbr, bins=40, log=True, edgecolor=\"black\")\n",
"plt.autoscale(tight=True, axis=\"x\")\n",
"plt.xlabel(\"Rainfall intensity [dB]\")\n",
"plt.ylabel(\"Counts\")\n",
"plt.title(\"Precipitation rain rate histogram in dB units\")\n",
"\n",
"# Let's add a lognormal distribution that fits that data to the plot.\n",
"import scipy\n",
"\n",
"bin_center = (bins[1:] + bins[:-1]) * 0.5\n",
"bin_width = np.diff(bins)\n",
"\n",
"# We will only use one composite to fit the function to speed up things.\n",
"# First, remove the no precip areas.\"\n",
"precip_to_fit = valid_precip_dbr[valid_precip_dbr > -15]\n",
"\n",
"fit_params = scipy.stats.lognorm.fit(precip_to_fit)\n",
"\n",
"fitted_pdf = scipy.stats.lognorm.pdf(bin_center, *fit_params)\n",
"\n",
"# Multiply pdf by the bin width and the total number of grid points: pdf -> total counts per bin.\n",
"fitted_pdf = fitted_pdf * bin_width * precip_to_fit.size\n",
"\n",
"# Plot the log-normal fit\n",
"plt.plot(bin_center, fitted_pdf, label=\"Fitted log-normal\")\n",
"plt.legend()\n",
"plt.show()"
]
},
{
"cell_type": "markdown",
"metadata": {
"colab_type": "text",
"id": "ZocO5zqUBndg"
},
"source": [
"That looks more like a log-normal distribution. Note the large peak at -15dB. That peak corresponds to \"zero\" (below threshold) precipitation. The jump with no data in between -15 and -10 dB is caused by the precision of the data, which we had set to 1 decimal. Hence, the lowest precipitation intensities (above zero) are 0.1 mm/h (= -10 dB).\n",
"\n",
"## Compute the nowcast\n",
"\n",
"These are the minimal steps to compute a short-term forecast using Lagrangian extrapolation of the precipitation patterns:\n",
" \n",
" 1. Estimate the precipitation motion field.\n",
" 1. Use the motion field to advect the most recent radar rainfall field and produce an extrapolation forecast.\n",
"\n",
"### Estimate the motion field\n",
"\n",
"Now we can estimate the motion field. Here we use a local feature-tracking approach (Lucas-Kanade).\n",
"However, check the other methods available in the [pysteps.motion](https://pysteps.readthedocs.io/en/latest/pysteps_reference/motion.html) module."
]
},
{
"cell_type": "code",
"execution_count": null,
"metadata": {
"colab": {},
"colab_type": "code",
"id": "mnACmX_0Bndi"
},
"outputs": [],
"source": [
"# Estimate the motion field with Lucas-Kanade\n",
"from pysteps import motion\n",
"from pysteps.visualization import plot_precip_field, quiver\n",
"\n",
"# Import the Lucas-Kanade optical flow algorithm\n",
"oflow_method = motion.get_method(\"LK\")\n",
"\n",
"# Estimate the motion field from the training data (in dBR)\n",
"motion_field = oflow_method(train_precip_dbr)\n",
"\n",
"## Plot the motion field.\n",
"# Use a figure size that looks nice ;)\n",
"plt.figure(figsize=(9, 5), dpi=100)\n",
"plt.title(\"Estimated motion field with the Lukas-Kanade algorithm\")\n",
"\n",
"# Plot the last rainfall field in the \"training\" data.\n",
"# Remember to use the mm/h precipitation data since plot_precip_field assumes\n",
"# mm/h by default. You can change this behavior using the \"units\" keyword.\n",
"plot_precip_field(train_precip[-1], geodata=metadata, axis=\"off\")\n",
"\n",
"# Plot the motion field vectors\n",
"quiver(motion_field, geodata=metadata, step=40)\n",
"\n",
"plt.show()"
]
},
{
"cell_type": "markdown",
"metadata": {
"colab_type": "text",
"id": "YObddRFCBnd1"
},
"source": [
"### Extrapolate the observations\n",
"\n",
"We have all ingredients to make an extrapolation nowcast now. \n",
"The final step is to advect the most recent radar rainfall field along the estimated motion field, producing an extrapolation forecast."
]
},
{
"cell_type": "code",
"execution_count": null,
"metadata": {
"colab": {},
"colab_type": "code",
"id": "erSLAzvNBnd3"
},
"outputs": [],
"source": [
"from pysteps import nowcasts\n",
"\n",
"start = time.time()\n",
"\n",
"# Extrapolate the last radar observation\n",
"extrapolate = nowcasts.get_method(\"extrapolation\")\n",
"\n",
"# You can use the precipitation observations directly in mm/h for this step.\n",
"last_observation = train_precip[-1]\n",
"\n",
"last_observation[~np.isfinite(last_observation)] = metadata[\"zerovalue\"]\n",
"\n",
"# We set the number of leadtimes (the length of the forecast horizon) to the\n",
"# length of the observed/verification preipitation data. In this way, we'll get\n",
"# a forecast that covers these time intervals.\n",
"n_leadtimes = observed_precip.shape[0]\n",
"\n",
"# Advect the most recent radar rainfall field and make the nowcast.\n",
"precip_forecast = extrapolate(train_precip[-1], motion_field, n_leadtimes)\n",
"\n",
"# This shows the shape of the resulting array with [time intervals, rows, cols]\n",
"print(\"The shape of the resulting array is: \", precip_forecast.shape)\n",
"\n",
"end = time.time()\n",
"print(\"Advecting the radar rainfall fields took \", (end - start), \" seconds\")"
]
},
{
"cell_type": "markdown",
"metadata": {
"colab_type": "text",
"id": "csy5s-yRBneB"
},
"source": [
"Let's inspect the last forecast time (hence this is the forecast rainfall an hour ahead)."
]
},
{
"cell_type": "code",
"execution_count": null,
"metadata": {
"colab": {},
"colab_type": "code",
"id": "MUiS5-HPBneD"
},
"outputs": [],
"source": [
"# Plot precipitation at the end of the forecast period.\n",
"plt.figure(figsize=(9, 5), dpi=100)\n",
"plot_precip_field(precip_forecast[-1], geodata=metadata, axis=\"off\")\n",
"plt.show()"
]
},
{
"cell_type": "markdown",
"metadata": {
"colab_type": "text",
"id": "mQEseXvhBneI"
},
"source": [
"## Evaluate the forecast quality\n",
"\n",
"Many verification methods are already present in pysteps (see a complete list [here](https://pysteps.readthedocs.io/en/latest/pysteps_reference/verification.html)). We just have to import them. \n",
"\n",
"Here, we will evaluate our forecast using the Fractions Skill Score (FSS). \n",
"This metric provides an intuitive assessment of the dependency of forecast skill on spatial scale and intensity. This makes the FSS an ideal skill score for high-resolution precipitation forecasts.\n",
"\n",
"More precisely, the FSS is a neighborhood spatial verification method that directly compares the fractional coverage of events in windows surrounding the observations and forecasts.\n",
"The FSS varies from 0 (total mismatch) to 1 (perfect forecast).\n",
"For
gitextract_7wafhhns/ ├── .github/ │ └── workflows/ │ ├── check_black.yml │ ├── python-publish.yml │ └── test_pysteps.yml ├── .gitignore ├── .pre-commit-config.yaml ├── .readthedocs.yml ├── CITATION.bib ├── CONTRIBUTING.rst ├── LICENSE ├── MANIFEST.in ├── PKG-INFO ├── README.rst ├── ci/ │ ├── ci_test_env.yml │ ├── fetch_pysteps_data.py │ └── test_plugin_support.py ├── doc/ │ ├── .gitignore │ ├── Makefile │ ├── _static/ │ │ └── pysteps.css │ ├── _templates/ │ │ └── layout.html │ ├── make.bat │ ├── rebuild_docs.sh │ ├── requirements.txt │ └── source/ │ ├── conf.py │ ├── developer_guide/ │ │ ├── build_the_docs.rst │ │ ├── contributors_guidelines.rst │ │ ├── importer_plugins.rst │ │ ├── pypi.rst │ │ ├── test_pysteps.rst │ │ └── update_conda_forge.rst │ ├── index.rst │ ├── pysteps_reference/ │ │ ├── blending.rst │ │ ├── cascade.rst │ │ ├── datasets.rst │ │ ├── decorators.rst │ │ ├── downscaling.rst │ │ ├── extrapolation.rst │ │ ├── feature.rst │ │ ├── index.rst │ │ ├── io.rst │ │ ├── motion.rst │ │ ├── noise.rst │ │ ├── nowcasts.rst │ │ ├── postprocessing.rst │ │ ├── pysteps.rst │ │ ├── timeseries.rst │ │ ├── tracking.rst │ │ ├── utils.rst │ │ ├── verification.rst │ │ └── visualization.rst │ ├── references.bib │ ├── user_guide/ │ │ ├── example_data.rst │ │ ├── install_pysteps.rst │ │ ├── machine_learning_pysteps.rst │ │ ├── pystepsrc_example.rst │ │ └── set_pystepsrc.rst │ └── zz_bibliography.rst ├── environment.yml ├── environment_dev.yml ├── examples/ │ ├── LK_buffer_mask.py │ ├── README.txt │ ├── advection_correction.py │ ├── anvil_nowcast.py │ ├── data_transformations.py │ ├── ens_kalman_filter_blended_forecast.py │ ├── linda_nowcasts.py │ ├── my_first_nowcast.ipynb │ ├── optical_flow_methods_convergence.py │ ├── plot_cascade_decomposition.py │ ├── plot_custom_precipitation_range.py │ ├── plot_ensemble_verification.py │ ├── plot_extrapolation_nowcast.py │ ├── plot_linear_blending.py │ ├── plot_noise_generators.py │ ├── plot_optical_flow.py │ ├── plot_steps_nowcast.py │ ├── probability_forecast.py │ ├── rainfarm_downscale.py │ ├── steps_blended_forecast.py │ └── thunderstorm_detection_and_tracking.py ├── pyproject.toml ├── pysteps/ │ ├── __init__.py │ ├── blending/ │ │ ├── __init__.py │ │ ├── clim.py │ │ ├── ens_kalman_filter_methods.py │ │ ├── interface.py │ │ ├── linear_blending.py │ │ ├── pca_ens_kalman_filter.py │ │ ├── skill_scores.py │ │ ├── steps.py │ │ └── utils.py │ ├── cascade/ │ │ ├── __init__.py │ │ ├── bandpass_filters.py │ │ ├── decomposition.py │ │ └── interface.py │ ├── datasets.py │ ├── decorators.py │ ├── downscaling/ │ │ ├── __init__.py │ │ ├── interface.py │ │ └── rainfarm.py │ ├── exceptions.py │ ├── extrapolation/ │ │ ├── __init__.py │ │ ├── interface.py │ │ └── semilagrangian.py │ ├── feature/ │ │ ├── __init__.py │ │ ├── blob.py │ │ ├── interface.py │ │ ├── shitomasi.py │ │ └── tstorm.py │ ├── io/ │ │ ├── __init__.py │ │ ├── archive.py │ │ ├── exporters.py │ │ ├── importers.py │ │ ├── interface.py │ │ ├── mch_lut_8bit_Metranet_AZC_V104.txt │ │ ├── mch_lut_8bit_Metranet_v103.txt │ │ ├── nowcast_importers.py │ │ └── readers.py │ ├── motion/ │ │ ├── __init__.py │ │ ├── _proesmans.pyx │ │ ├── _vet.pyx │ │ ├── constant.py │ │ ├── darts.py │ │ ├── farneback.py │ │ ├── interface.py │ │ ├── lucaskanade.py │ │ ├── proesmans.py │ │ └── vet.py │ ├── noise/ │ │ ├── __init__.py │ │ ├── fftgenerators.py │ │ ├── interface.py │ │ ├── motion.py │ │ └── utils.py │ ├── nowcasts/ │ │ ├── __init__.py │ │ ├── anvil.py │ │ ├── extrapolation.py │ │ ├── interface.py │ │ ├── lagrangian_probability.py │ │ ├── linda.py │ │ ├── sprog.py │ │ ├── sseps.py │ │ ├── steps.py │ │ └── utils.py │ ├── postprocessing/ │ │ ├── __init__.py │ │ ├── diagnostics.py │ │ ├── ensemblestats.py │ │ ├── interface.py │ │ └── probmatching.py │ ├── pystepsrc │ ├── pystepsrc_schema.json │ ├── scripts/ │ │ ├── __init__.py │ │ ├── fit_vel_pert_params.py │ │ └── run_vel_pert_analysis.py │ ├── tests/ │ │ ├── __init__.py │ │ ├── helpers.py │ │ ├── test_archive.py │ │ ├── test_blending_clim.py │ │ ├── test_blending_linear_blending.py │ │ ├── test_blending_pca_ens_kalman_filter.py │ │ ├── test_blending_skill_scores.py │ │ ├── test_blending_steps.py │ │ ├── test_blending_utils.py │ │ ├── test_cascade.py │ │ ├── test_datasets.py │ │ ├── test_decorators.py │ │ ├── test_downscaling_rainfarm.py │ │ ├── test_ensscores.py │ │ ├── test_exporters.py │ │ ├── test_extrapolation_semilagrangian.py │ │ ├── test_feature.py │ │ ├── test_feature_tstorm.py │ │ ├── test_importer_decorator.py │ │ ├── test_interfaces.py │ │ ├── test_io_archive.py │ │ ├── test_io_bom_rf3.py │ │ ├── test_io_dwd_hdf5.py │ │ ├── test_io_fmi_geotiff.py │ │ ├── test_io_fmi_pgm.py │ │ ├── test_io_knmi_hdf5.py │ │ ├── test_io_mch_gif.py │ │ ├── test_io_mrms_grib.py │ │ ├── test_io_nowcast_importers.py │ │ ├── test_io_opera_hdf5.py │ │ ├── test_io_readers.py │ │ ├── test_io_saf_crri.py │ │ ├── test_motion.py │ │ ├── test_motion_farneback.py │ │ ├── test_motion_lk.py │ │ ├── test_noise_fftgenerators.py │ │ ├── test_noise_motion.py │ │ ├── test_nowcasts_anvil.py │ │ ├── test_nowcasts_lagrangian_probability.py │ │ ├── test_nowcasts_linda.py │ │ ├── test_nowcasts_sprog.py │ │ ├── test_nowcasts_sseps.py │ │ ├── test_nowcasts_steps.py │ │ ├── test_nowcasts_utils.py │ │ ├── test_paramsrc.py │ │ ├── test_plt_animate.py │ │ ├── test_plt_cartopy.py │ │ ├── test_plt_motionfields.py │ │ ├── test_plt_precipfields.py │ │ ├── test_plugins_support.py │ │ ├── test_postprocessing_ensemblestats.py │ │ ├── test_postprocessing_probmatching.py │ │ ├── test_timeseries_autoregression.py │ │ ├── test_tracking_tdating.py │ │ ├── test_utils_arrays.py │ │ ├── test_utils_cleansing.py │ │ ├── test_utils_conversion.py │ │ ├── test_utils_dimension.py │ │ ├── test_utils_interpolate.py │ │ ├── test_utils_pca.py │ │ ├── test_utils_reprojection.py │ │ ├── test_utils_spectral.py │ │ ├── test_utils_transformation.py │ │ ├── test_verification_detcatscores.py │ │ ├── test_verification_detcontscores.py │ │ ├── test_verification_probscores.py │ │ ├── test_verification_salscores.py │ │ └── test_verification_spatialscores.py │ ├── timeseries/ │ │ ├── __init__.py │ │ ├── autoregression.py │ │ └── correlation.py │ ├── tracking/ │ │ ├── __init__.py │ │ ├── interface.py │ │ ├── lucaskanade.py │ │ └── tdating.py │ ├── utils/ │ │ ├── __init__.py │ │ ├── arrays.py │ │ ├── check_norain.py │ │ ├── cleansing.py │ │ ├── conversion.py │ │ ├── dimension.py │ │ ├── fft.py │ │ ├── images.py │ │ ├── interface.py │ │ ├── interpolate.py │ │ ├── pca.py │ │ ├── reprojection.py │ │ ├── spectral.py │ │ ├── tapering.py │ │ └── transformation.py │ ├── verification/ │ │ ├── __init__.py │ │ ├── detcatscores.py │ │ ├── detcontscores.py │ │ ├── ensscores.py │ │ ├── interface.py │ │ ├── lifetime.py │ │ ├── plots.py │ │ ├── probscores.py │ │ ├── salscores.py │ │ └── spatialscores.py │ └── visualization/ │ ├── __init__.py │ ├── animations.py │ ├── basemaps.py │ ├── motionfields.py │ ├── precipfields.py │ ├── spectral.py │ ├── thunderstorms.py │ └── utils.py ├── requirements.txt ├── requirements_dev.txt ├── setup.py └── tox.ini
SYMBOL INDEX (737 symbols across 161 files)
FILE: doc/source/conf.py
function get_version (line 64) | def get_version():
function set_root (line 105) | def set_root():
FILE: examples/advection_correction.py
function advection_correction (line 87) | def advection_correction(R, T=5, t=1):
FILE: examples/anvil_nowcast.py
function plot_growth_decay_circles (line 134) | def plot_growth_decay_circles(ax):
FILE: examples/data_transformations.py
function plot_distribution (line 76) | def plot_distribution(data, labels, skw):
FILE: examples/optical_flow_methods_convergence.py
function create_motion_field (line 123) | def create_motion_field(input_precip, motion_type):
function create_observations (line 177) | def create_observations(input_precip, motion_type, num_times=9):
function plot_optflow_method_convergence (line 245) | def plot_optflow_method_convergence(input_precip, optflow_method_name, m...
FILE: examples/plot_custom_precipitation_range.py
class ColormapConfig (line 103) | class ColormapConfig:
method __init__ (line 104) | def __init__(self):
method build_colormap (line 111) | def build_colormap(self):
FILE: examples/rainfarm_downscale.py
function read_precipitation_data (line 55) | def read_precipitation_data(file_path):
function upscale_field (line 93) | def upscale_field(precip, metadata, scale_factor):
FILE: pysteps/__init__.py
function _get_config_file_schema (line 29) | def _get_config_file_schema():
function _fconfig_candidates_generator (line 37) | def _fconfig_candidates_generator():
function config_fname (line 76) | def config_fname():
function _decode_filesystem_path (line 110) | def _decode_filesystem_path(path):
class _DotDictify (line 117) | class _DotDictify(dict):
method __setitem__ (line 132) | def __setitem__(self, key, value):
method __getitem__ (line 137) | def __getitem__(self, key):
function load_config_file (line 150) | def load_config_file(params_file=None, verbose=False, dryrun=False):
FILE: pysteps/blending/clim.py
function get_default_skill (line 25) | def get_default_skill(n_cascade_levels=6, n_models=1):
function save_skill (line 59) | def save_skill(
function calc_clim_skill (line 158) | def calc_clim_skill(
FILE: pysteps/blending/ens_kalman_filter_methods.py
class EnsembleKalmanFilter (line 79) | class EnsembleKalmanFilter:
method __init__ (line 81) | def __init__(self, config, params):
method update (line 105) | def update(
method get_covariance_matrix (line 196) | def get_covariance_matrix(
method get_tapering (line 235) | def get_tapering(self, n: int):
method get_precipitation_mask (line 267) | def get_precipitation_mask(self, forecast_array: np.ndarray):
method get_lien_criterion (line 300) | def get_lien_criterion(self, nwc_ensemble: np.ndarray, nwp_ensemble: n...
method get_weighting_for_probability_matching (line 338) | def get_weighting_for_probability_matching(
class MaskedEnKF (line 401) | class MaskedEnKF(EnsembleKalmanFilter):
method __init__ (line 403) | def __init__(self, config, params):
method correct_step (line 451) | def correct_step(
method get_inflation_factor_obs (line 628) | def get_inflation_factor_obs(self):
FILE: pysteps/blending/interface.py
function get_method (line 27) | def get_method(name):
FILE: pysteps/blending/linear_blending.py
function forecast (line 29) | def forecast(
function _get_slice (line 282) | def _get_slice(n_dims, ref_dim, ref_id):
function _get_ranked_salience (line 289) | def _get_ranked_salience(precip_nowcast, precip_nwp):
function _get_ws (line 326) | def _get_ws(weight, ranked_salience):
FILE: pysteps/blending/pca_ens_kalman_filter.py
class EnKFCombinationConfig (line 82) | class EnKFCombinationConfig:
class EnKFCombinationParams (line 226) | class EnKFCombinationParams:
class ForecastInitialization (line 251) | class ForecastInitialization:
method __init__ (line 262) | def __init__(
method __initialize_nowcast_components (line 294) | def __initialize_nowcast_components(self):
method __prepare_radar_data_and_ar_parameters (line 352) | def __prepare_radar_data_and_ar_parameters(self):
method __initialize_noise (line 471) | def __initialize_noise(self):
method __initialize_noise_field_pool (line 528) | def __initialize_noise_field_pool(self):
class ForecastState (line 585) | class ForecastState:
method __init__ (line 590) | def __init__(
class ForecastModel (line 620) | class ForecastModel:
method __init__ (line 625) | def __init__(
method run_forecast_step (line 670) | def run_forecast_step(self, nwp, is_correction_timestep=False):
method backtransform (line 717) | def backtransform(self):
method __decompose (line 730) | def __decompose(self, is_correction_timestep):
method __advect (line 774) | def __advect(self):
method __iterate (line 811) | def __iterate(self):
method __update_precip_mask (line 832) | def __update_precip_mask(self, nwp):
method __probability_matching (line 876) | def __probability_matching(self):
method __set_no_data (line 886) | def __set_no_data(self):
method fill_backtransform (line 892) | def fill_backtransform(self, nwp):
class EnKFCombinationNowcaster (line 923) | class EnKFCombinationNowcaster:
method __init__ (line 924) | def __init__(
method compute_forecast (line 955) | def compute_forecast(self):
method __check_inputs (line 1099) | def __check_inputs(self):
method __check_input_timestamps (line 1202) | def __check_input_timestamps(self):
method __check_no_rain_case (line 1285) | def __check_no_rain_case(self):
method __print_forecast_info (line 1320) | def __print_forecast_info(self):
method __integrated_nowcast_main_loop (line 1376) | def __integrated_nowcast_main_loop(self):
method __forecast_loop (line 1476) | def __forecast_loop(self, t, is_correction_timestep, is_nowcasting_tim...
method __write_output (line 1530) | def __write_output(self):
method __measure_time (line 1540) | def __measure_time(self, label, start_time):
function forecast (line 1555) | def forecast(
FILE: pysteps/blending/skill_scores.py
function spatial_correlation (line 22) | def spatial_correlation(obs, mod, domain_mask):
function lt_dependent_cor_nwp (line 81) | def lt_dependent_cor_nwp(lt, correlations, outdir_path, n_model=0, skill...
function lt_dependent_cor_extrapolation (line 139) | def lt_dependent_cor_extrapolation(PHI, correlations=None, correlations_...
function clim_regr_values (line 187) | def clim_regr_values(n_cascade_levels, outdir_path, n_model=0, skill_kwa...
FILE: pysteps/blending/steps.py
class StepsBlendingConfig (line 75) | class StepsBlendingConfig:
class StepsBlendingParams (line 325) | class StepsBlendingParams:
class StepsBlendingState (line 362) | class StepsBlendingState:
class StepsBlendingNowcaster (line 437) | class StepsBlendingNowcaster:
method __init__ (line 438) | def __init__(
method compute_forecast (line 470) | def compute_forecast(self):
method __blended_nowcast_main_loop (line 613) | def __blended_nowcast_main_loop(self):
method __check_inputs (line 722) | def __check_inputs(self):
method __print_forecast_info (line 917) | def __print_forecast_info(self):
method __initialize_nowcast_components (line 1011) | def __initialize_nowcast_components(self):
method __prepare_radar_and_NWP_fields (line 1058) | def __prepare_radar_and_NWP_fields(self):
method __decompose_member (line 1215) | def __decompose_member(self, member_field):
method __zero_precipitation_forecast (line 1244) | def __zero_precipitation_forecast(self):
method __prepare_nowcast_for_zero_radar (line 1300) | def __prepare_nowcast_for_zero_radar(self):
method __initialize_noise (line 1375) | def __initialize_noise(self):
method __estimate_ar_parameters_radar (line 1433) | def __estimate_ar_parameters_radar(self):
method __multiply_precip_cascade_to_match_ensemble_members (line 1510) | def __multiply_precip_cascade_to_match_ensemble_members(self):
method __initialize_random_generators (line 1525) | def __initialize_random_generators(self):
method __prepare_forecast_loop (line 1579) | def __prepare_forecast_loop(self):
method __initialize_noise_cascades (line 1640) | def __initialize_noise_cascades(self):
method __determine_subtimesteps_and_nowcast_time_step (line 1683) | def __determine_subtimesteps_and_nowcast_time_step(self, t, subtimeste...
method __decompose_nwp_if_needed_and_fill_nans_in_nwp (line 1709) | def __decompose_nwp_if_needed_and_fill_nans_in_nwp(self, t):
method __find_nowcast_NWP_combination (line 1781) | def __find_nowcast_NWP_combination(self, t):
method __determine_skill_for_current_timestep (line 1985) | def __determine_skill_for_current_timestep(self, t):
method __determine_NWP_skill_for_next_timestep (line 2042) | def __determine_NWP_skill_for_next_timestep(self, t, j, worker_state):
method __determine_weights_per_component (line 2079) | def __determine_weights_per_component(self, t, worker_state):
method __regress_extrapolation_and_noise_cascades (line 2177) | def __regress_extrapolation_and_noise_cascades(self, j, worker_state, t):
method __perturb_blend_and_advect_extrapolation_and_noise_to_current_timestep (line 2264) | def __perturb_blend_and_advect_extrapolation_and_noise_to_current_time...
method __blend_cascades (line 2679) | def __blend_cascades(self, t_sub, j, worker_state):
method __recompose_cascade_to_rainfall_field (line 2935) | def __recompose_cascade_to_rainfall_field(self, j, worker_state):
method __post_process_output (line 2968) | def __post_process_output(
method __measure_time (line 3265) | def __measure_time(self, label, start_time):
function forecast (line 3280) | def forecast(
function calculate_ratios (line 3690) | def calculate_ratios(correlations):
function calculate_weights_bps (line 3715) | def calculate_weights_bps(correlations):
function calculate_weights_spn (line 3776) | def calculate_weights_spn(correlations, covariance):
function calculate_end_weights (line 3858) | def calculate_end_weights(
function blend_means_sigmas (line 3964) | def blend_means_sigmas(means, sigmas, weights):
FILE: pysteps/blending/utils.py
function stack_cascades (line 48) | def stack_cascades(R_d, donorm=True):
function blend_cascades (line 88) | def blend_cascades(cascades_norm, weights):
function recompose_cascade (line 149) | def recompose_cascade(combined_cascade, combined_mean, combined_sigma):
function blend_optical_flows (line 185) | def blend_optical_flows(flows, weights):
function decompose_NWP (line 243) | def decompose_NWP(
function compute_store_nwp_motion (line 393) | def compute_store_nwp_motion(
function load_NWP (line 443) | def load_NWP(input_nc_path_decomp, input_path_velocities, start_time, n_...
function check_norain (line 536) | def check_norain(precip_arr, precip_thr=None, norain_thr=0.0):
function compute_smooth_dilated_mask (line 561) | def compute_smooth_dilated_mask(
FILE: pysteps/cascade/bandpass_filters.py
function filter_uniform (line 53) | def filter_uniform(shape, n):
function filter_gaussian (line 91) | def filter_gaussian(
function _gaussweights_1d (line 209) | def _gaussweights_1d(l, n, gauss_scale=0.5):
FILE: pysteps/cascade/decomposition.py
function decomposition_fft (line 77) | def decomposition_fft(field, bp_filter, **kwargs):
function recompose_fft (line 264) | def recompose_fft(decomp, **kwargs):
FILE: pysteps/cascade/interface.py
function get_method (line 21) | def get_method(name):
FILE: pysteps/datasets.py
function _decode_filesystem_path (line 62) | def _decode_filesystem_path(path):
function info (line 69) | def info():
class ShowProgress (line 90) | class ShowProgress(object):
method __init__ (line 102) | def __init__(self, bar_length=20):
method _clear_line (line 108) | def _clear_line(self):
method _print (line 112) | def _print(self, msg):
method __call__ (line 116) | def __call__(self, count, block_size, total_size, exact=True):
method end (line 160) | def end(message="Download complete"):
function download_mrms_data (line 164) | def download_mrms_data(dir_path, initial_date, final_date, timestep=2, n...
function download_pysteps_data (line 286) | def download_pysteps_data(dir_path, force=True):
function create_default_pystepsrc (line 337) | def create_default_pystepsrc(
function load_dataset (line 409) | def load_dataset(case="fmi", frames=14):
FILE: pysteps/decorators.py
function _add_extra_kwrds_to_docstrings (line 27) | def _add_extra_kwrds_to_docstrings(target_func, extra_kwargs_doc_text):
function postprocess_import (line 44) | def postprocess_import(fillna=np.nan, dtype="double"):
function check_input_frames (line 112) | def check_input_frames(
function prepare_interpolator (line 153) | def prepare_interpolator(nchunks=4):
function memoize (line 253) | def memoize(maxsize=10):
function deprecate_args (line 288) | def deprecate_args(old_new_args, deprecation_release):
FILE: pysteps/downscaling/interface.py
function get_method (line 20) | def get_method(name):
FILE: pysteps/downscaling/rainfarm.py
function _gaussianize (line 28) | def _gaussianize(precip):
function _compute_freq_array (line 44) | def _compute_freq_array(array, ds_factor=1):
function _log_slope (line 54) | def _log_slope(log_k, log_power_spectrum):
function _estimate_alpha (line 72) | def _estimate_alpha(array, k):
function _compute_noise_field (line 84) | def _compute_noise_field(freq_array_highres, alpha):
function _apply_spectral_fusion (line 100) | def _apply_spectral_fusion(
function _compute_kernel_radius (line 165) | def _compute_kernel_radius(ds_factor):
function _make_tophat_kernel (line 169) | def _make_tophat_kernel(ds_factor):
function _make_gaussian_kernel (line 177) | def _make_gaussian_kernel(ds_factor):
function _balanced_spatial_average (line 192) | def _balanced_spatial_average(array, kernel):
function downscale (line 212) | def downscale(
FILE: pysteps/exceptions.py
class MissingOptionalDependency (line 6) | class MissingOptionalDependency(Exception):
class DirectoryNotEmpty (line 12) | class DirectoryNotEmpty(Exception):
class DataModelError (line 18) | class DataModelError(Exception):
FILE: pysteps/extrapolation/interface.py
function eulerian_persistence (line 41) | def eulerian_persistence(precip, velocity, timesteps, outval=np.nan, **k...
function _do_nothing (line 96) | def _do_nothing(precip, velocity, timesteps, outval=np.nan, **kwargs):
function _return_none (line 102) | def _return_none(**kwargs):
function get_method (line 114) | def get_method(name):
FILE: pysteps/extrapolation/semilagrangian.py
function extrapolate (line 21) | def extrapolate(
FILE: pysteps/feature/blob.py
function detection (line 27) | def detection(
FILE: pysteps/feature/interface.py
function get_method (line 42) | def get_method(name):
FILE: pysteps/feature/shitomasi.py
function detection (line 26) | def detection(
FILE: pysteps/feature/tstorm.py
function detection (line 51) | def detection(
function breakup (line 202) | def breakup(ref, minval, maxima):
function longdistance (line 217) | def longdistance(loc_max, mindis):
function get_profile (line 241) | def get_profile(areas, binary, ref, loc_max, time, minref, output_splits...
FILE: pysteps/io/archive.py
function find_by_date (line 19) | def find_by_date(
function _find_matching_filename (line 93) | def _find_matching_filename(
function _generate_path (line 124) | def _generate_path(date, root_path, path_format):
FILE: pysteps/io/exporters.py
function initialize_forecast_exporter_geotiff (line 124) | def initialize_forecast_exporter_geotiff(
function initialize_forecast_exporter_kineros (line 240) | def initialize_forecast_exporter_kineros(
function initialize_forecast_exporter_netcdf (line 369) | def initialize_forecast_exporter_netcdf(
function export_forecast_dataset (line 660) | def export_forecast_dataset(field, exporter):
function close_forecast_files (line 747) | def close_forecast_files(exporter):
function _export_geotiff (line 771) | def _export_geotiff(F, exporter):
function _export_kineros (line 826) | def _export_kineros(field, exporter):
function _export_netcdf (line 860) | def _export_netcdf(field, exporter):
function _convert_proj4_to_grid_mapping (line 890) | def _convert_proj4_to_grid_mapping(proj4str):
function _create_geotiff_file (line 951) | def _create_geotiff_file(outfn, driver, shape, metadata, num_bands):
function _get_geotiff_filename (line 972) | def _get_geotiff_filename(prefix, startdate, n_timesteps, timestep, time...
FILE: pysteps/io/importers.py
function _check_coords_range (line 158) | def _check_coords_range(selected_range, coordinate, full_range):
function _get_grib_projection (line 183) | def _get_grib_projection(grib_msg):
function _get_threshold_value (line 221) | def _get_threshold_value(precip):
function import_mrms_grib (line 246) | def import_mrms_grib(filename, extent=None, window_size=4, **kwargs):
function import_bom_rf3 (line 442) | def import_bom_rf3(filename, **kwargs):
function _import_bom_rf3_data (line 476) | def _import_bom_rf3_data(filename):
function _import_bom_rf3_geodata (line 488) | def _import_bom_rf3_geodata(ds_rainfall):
function import_fmi_geotiff (line 571) | def import_fmi_geotiff(filename, **kwargs):
function import_fmi_pgm (line 639) | def import_fmi_pgm(filename, gzipped=False, **kwargs):
function _import_fmi_pgm_geodata (line 697) | def _import_fmi_pgm_geodata(metadata):
function _import_fmi_pgm_metadata (line 736) | def _import_fmi_pgm_metadata(filename, gzipped=False):
function import_knmi_hdf5 (line 766) | def import_knmi_hdf5(
function import_mch_gif (line 921) | def import_mch_gif(filename, product, unit, accutime, **kwargs):
function import_mch_hdf5 (line 1052) | def import_mch_hdf5(filename, qty="RATE", **kwargs):
function _read_mch_hdf5_what_group (line 1186) | def _read_mch_hdf5_what_group(whatgrp):
function import_mch_metranet (line 1197) | def import_mch_metranet(filename, product, unit, accutime):
function _import_mch_geodata (line 1262) | def _import_mch_geodata():
function import_odim_hdf5 (line 1298) | def import_odim_hdf5(filename, qty="RATE", **kwargs):
function import_opera_hdf5 (line 1521) | def import_opera_hdf5(filename, qty="RATE", **kwargs):
function _read_opera_hdf5_what_group (line 1531) | def _read_opera_hdf5_what_group(whatgrp):
function import_saf_crri (line 1542) | def import_saf_crri(filename, extent=None, **kwargs):
function _import_saf_crri_data (line 1618) | def _import_saf_crri_data(filename, idx_x=None, idx_y=None):
function _import_saf_crri_geodata (line 1636) | def _import_saf_crri_geodata(filename):
function import_dwd_hdf5 (line 1677) | def import_dwd_hdf5(filename, qty="RATE", **kwargs):
function _read_hdf5_cont (line 1894) | def _read_hdf5_cont(f, d):
function _get_whatgrp (line 1935) | def _get_whatgrp(d, g):
function import_dwd_radolan (line 1971) | def import_dwd_radolan(filename, product_name):
function _identify_info_bits (line 2071) | def _identify_info_bits(data):
function _import_dwd_geodata (line 2118) | def _import_dwd_geodata(product_name, dims):
FILE: pysteps/io/interface.py
function discover_importers (line 45) | def discover_importers():
function importers_info (line 82) | def importers_info():
function get_method (line 132) | def get_method(name, method_type):
FILE: pysteps/io/nowcast_importers.py
function import_netcdf_pysteps (line 85) | def import_netcdf_pysteps(filename, onerror="warn", **kwargs):
function _convert_grid_mapping_to_proj4 (line 223) | def _convert_grid_mapping_to_proj4(grid_mapping):
FILE: pysteps/io/readers.py
function read_timeseries (line 17) | def read_timeseries(inputfns, importer, **kwargs):
FILE: pysteps/motion/constant.py
function constant (line 20) | def constant(R, **kwargs):
FILE: pysteps/motion/darts.py
function DARTS (line 23) | def DARTS(input_images, **kwargs):
function _leastsq (line 223) | def _leastsq(A, B, y):
function _fill (line 240) | def _fill(X, h, w, k_x, k_y):
FILE: pysteps/motion/farneback.py
function farneback (line 40) | def farneback(
FILE: pysteps/motion/interface.py
function get_method (line 49) | def get_method(name):
FILE: pysteps/motion/lucaskanade.py
function dense_lucaskanade (line 39) | def dense_lucaskanade(
FILE: pysteps/motion/proesmans.py
function proesmans (line 22) | def proesmans(
FILE: pysteps/motion/vet.py
function round_int (line 46) | def round_int(scalar):
function ceil_int (line 53) | def ceil_int(scalar):
function get_padding (line 60) | def get_padding(dimension_size, sectors):
function morph (line 93) | def morph(image, displacement, gradient=False):
function vet_cost_function_gradient (line 156) | def vet_cost_function_gradient(*args, **kwargs):
function vet_cost_function (line 165) | def vet_cost_function(
function vet (line 303) | def vet(
FILE: pysteps/noise/fftgenerators.py
function initialize_param_2d_fft_filter (line 53) | def initialize_param_2d_fft_filter(field, **kwargs):
function initialize_nonparam_2d_fft_filter (line 221) | def initialize_nonparam_2d_fft_filter(field, **kwargs):
function generate_noise_2d_fft_filter (line 330) | def generate_noise_2d_fft_filter(
function initialize_nonparam_2d_ssft_filter (line 442) | def initialize_nonparam_2d_ssft_filter(field, **kwargs):
function initialize_nonparam_2d_nested_filter (line 577) | def initialize_nonparam_2d_nested_filter(field, gridres=1.0, **kwargs):
function generate_noise_2d_ssft_filter (line 735) | def generate_noise_2d_ssft_filter(F, randstate=None, seed=None, **kwargs):
function _split_field (line 855) | def _split_field(idxi, idxj, Segments):
function _get_mask (line 882) | def _get_mask(Size, idxi, idxj, win_fun):
FILE: pysteps/noise/interface.py
function get_method (line 48) | def get_method(name):
FILE: pysteps/noise/motion.py
function get_default_params_bps_par (line 43) | def get_default_params_bps_par():
function get_default_params_bps_perp (line 49) | def get_default_params_bps_perp():
function initialize_bps (line 55) | def initialize_bps(
function generate_bps (line 146) | def generate_bps(perturbator, t):
FILE: pysteps/noise/utils.py
function compute_noise_stddev_adjs (line 24) | def compute_noise_stddev_adjs(
FILE: pysteps/nowcasts/anvil.py
function forecast (line 37) | def forecast(
function _check_inputs (line 331) | def _check_inputs(vil, rainrate, velocity, timesteps, ar_order):
function _estimate_ar1_params (line 358) | def _estimate_ar1_params(gamma):
function _estimate_ar2_params (line 369) | def _estimate_ar2_params(gamma):
function _moving_window_corrcoef (line 389) | def _moving_window_corrcoef(x, y, window_radius):
function _r_vil_regression (line 431) | def _r_vil_regression(vil, r, window_radius):
function _update (line 480) | def _update(state, params):
FILE: pysteps/nowcasts/extrapolation.py
function forecast (line 19) | def forecast(
function _check_inputs (line 104) | def _check_inputs(precip, velocity, timesteps):
FILE: pysteps/nowcasts/interface.py
function get_method (line 57) | def get_method(name):
FILE: pysteps/nowcasts/lagrangian_probability.py
function forecast (line 20) | def forecast(
function _get_kernel (line 110) | def _get_kernel(size):
FILE: pysteps/nowcasts/linda.py
function forecast (line 62) | def forecast(
function _check_inputs (line 386) | def _check_inputs(precip, velocity, timesteps, ari_order):
function _composite_convolution (line 403) | def _composite_convolution(field, kernels, weights):
function _compute_ellipse_bbox (line 417) | def _compute_ellipse_bbox(phi, sigma1, sigma2, cutoff):
function _compute_inverse_acf_mapping (line 436) | def _compute_inverse_acf_mapping(target_dist, target_dist_params, n_inte...
function _compute_kernel_anisotropic (line 465) | def _compute_kernel_anisotropic(params, cutoff=6.0):
function _compute_kernel_isotropic (line 494) | def _compute_kernel_isotropic(sigma, cutoff=6.0):
function _compute_parametric_acf (line 518) | def _compute_parametric_acf(params, m, n):
function _compute_sample_acf (line 553) | def _compute_sample_acf(field):
function _compute_window_weights (line 562) | def _compute_window_weights(coords, grid_height, grid_width, window_radi...
function _estimate_ar1_params (line 594) | def _estimate_ar1_params(
function _estimate_ar2_params (line 623) | def _estimate_ar2_params(
function _estimate_convol_params (line 673) | def _estimate_convol_params(
function _estimate_perturbation_params (line 749) | def _estimate_perturbation_params(
function _fit_acf (line 865) | def _fit_acf(acf):
function _fit_dist (line 889) | def _fit_dist(err, dist, wf, mask):
function _generate_perturbations (line 900) | def _generate_perturbations(pert_gen, num_workers, seed):
function _get_acf_params (line 939) | def _get_acf_params(p):
function _get_anisotropic_kernel_params (line 944) | def _get_anisotropic_kernel_params(p):
function _iterate_ar_model (line 951) | def _iterate_ar_model(input_fields, psi):
function _linda_forecast (line 961) | def _linda_forecast(
function _linda_deterministic_init (line 1062) | def _linda_deterministic_init(
function _linda_perturbation_init (line 1298) | def _linda_perturbation_init(
function _masked_convolution (line 1396) | def _masked_convolution(field, kernel):
function _update (line 1410) | def _update(state, params):
function _weighted_std (line 1455) | def _weighted_std(f, w):
function _window_tukey (line 1469) | def _window_tukey(m, n, ci, cj, ri, rj, alpha=0.5):
function _window_uniform (line 1500) | def _window_uniform(m, n, ci, cj, ri, rj):
FILE: pysteps/nowcasts/sprog.py
function forecast (line 32) | def forecast(
function _check_inputs (line 380) | def _check_inputs(precip, velocity, timesteps, ar_order):
function _update (line 396) | def _update(state, params):
FILE: pysteps/nowcasts/sseps.py
function forecast (line 40) | def forecast(
function _check_inputs (line 937) | def _check_inputs(precip, velocity, timesteps, ar_order):
function _recompose_cascade (line 954) | def _recompose_cascade(precip, mu, sigma):
function _build_2D_tapering_function (line 961) | def _build_2D_tapering_function(win_size, win_type="flat-hanning"):
function _get_mask (line 1024) | def _get_mask(Size, idxi, idxj, win_type="flat-hanning"):
FILE: pysteps/nowcasts/steps.py
class StepsNowcasterConfig (line 42) | class StepsNowcasterConfig:
class StepsNowcasterParams (line 239) | class StepsNowcasterParams:
class StepsNowcasterState (line 266) | class StepsNowcasterState:
class StepsNowcaster (line 287) | class StepsNowcaster:
method __init__ (line 288) | def __init__(
method compute_forecast (line 308) | def compute_forecast(self):
method __nowcast_main (line 416) | def __nowcast_main(self):
method __check_inputs (line 451) | def __check_inputs(self):
method __print_forecast_info (line 545) | def __print_forecast_info(self):
method __initialize_nowcast_components (line 617) | def __initialize_nowcast_components(self):
method __perform_extrapolation (line 663) | def __perform_extrapolation(self):
method __apply_noise_and_ar_model (line 716) | def __apply_noise_and_ar_model(self):
method __initialize_velocity_perturbations (line 896) | def __initialize_velocity_perturbations(self):
method __initialize_precipitation_mask (line 930) | def __initialize_precipitation_mask(self):
method __initialize_fft_objects (line 994) | def __initialize_fft_objects(self):
method __return_state_dict (line 1006) | def __return_state_dict(self):
method __return_params_dict (line 1020) | def __return_params_dict(self, precip):
method __update_state (line 1049) | def __update_state(self, state, params):
method __update_deterministic_ar_model (line 1081) | def __update_deterministic_ar_model(self, state, params):
method __apply_ar_model_to_cascades (line 1108) | def __apply_ar_model_to_cascades(self, j, state, params):
method __generate_and_decompose_noise (line 1140) | def __generate_and_decompose_noise(self, j, state, params):
method __recompose_and_apply_mask (line 1164) | def __recompose_and_apply_mask(self, j, state, params):
method __apply_precipitation_mask (line 1213) | def __apply_precipitation_mask(self, precip_forecast, j, state, params):
method __measure_time (line 1234) | def __measure_time(self, label, start_time):
method reset_states_and_params (line 1248) | def reset_states_and_params(self):
function forecast (line 1265) | def forecast(
FILE: pysteps/nowcasts/utils.py
function binned_timesteps (line 34) | def binned_timesteps(timesteps):
function compute_dilated_mask (line 69) | def compute_dilated_mask(input_mask, kr, r):
function compute_percentile_mask (line 102) | def compute_percentile_mask(precip, pct):
function zero_precipitation_forecast (line 141) | def zero_precipitation_forecast(
function create_timestep_range (line 247) | def create_timestep_range(timesteps):
function nowcast_main_loop (line 265) | def nowcast_main_loop(
function print_ar_params (line 536) | def print_ar_params(phi):
function print_corrcoefs (line 574) | def print_corrcoefs(gamma):
function stack_cascades (line 612) | def stack_cascades(precip_decomp, n_levels, convert_to_full_arrays=False):
FILE: pysteps/postprocessing/ensemblestats.py
function mean (line 20) | def mean(X, ignore_nan=False, X_thr=None):
function excprob (line 61) | def excprob(X, X_thr, ignore_nan=False):
function banddepth (line 118) | def banddepth(X, thr=None, norm=False):
FILE: pysteps/postprocessing/interface.py
function add_postprocessor (line 36) | def add_postprocessor(
function discover_postprocessors (line 79) | def discover_postprocessors():
function print_postprocessors_info (line 103) | def print_postprocessors_info(module_name, interface_methods, module_met...
function postprocessors_info (line 146) | def postprocessors_info():
function get_method (line 187) | def get_method(name, method_type):
FILE: pysteps/postprocessing/probmatching.py
function compute_empirical_cdf (line 24) | def compute_empirical_cdf(bin_edges, hist):
function nonparam_match_empirical_cdf (line 55) | def nonparam_match_empirical_cdf(initial_array, target_array, ignore_ind...
function pmm_init (line 144) | def pmm_init(bin_edges_1, cdf_1, bin_edges_2, cdf_2):
function pmm_compute (line 171) | def pmm_compute(pmm, x):
function shift_scale (line 193) | def shift_scale(R, f, rain_fraction_trg, second_moment_trg, **kwargs):
function resample_distributions (line 277) | def resample_distributions(
function _invfunc (line 340) | def _invfunc(y, fx, fy):
FILE: pysteps/tests/helpers.py
function get_precipitation_fields (line 28) | def get_precipitation_fields(
function smart_assert (line 214) | def smart_assert(actual_value, expected, tolerance=None):
function get_invalid_mask (line 232) | def get_invalid_mask(input_array, fillna=np.nan):
FILE: pysteps/tests/test_archive.py
function test_generate_path (line 19) | def test_generate_path(timestamp, path_fmt, expected_path):
FILE: pysteps/tests/test_blending_clim.py
function generate_fixed_skill (line 22) | def generate_fixed_skill(n_cascade_levels, n_models=1):
function test_save_skill (line 101) | def test_save_skill(startdatestr, enddatestr, n_models, expected_skill_t...
FILE: pysteps/tests/test_blending_linear_blending.py
function test_linear_blending (line 162) | def test_linear_blending(
function test_salient_weight (line 293) | def test_salient_weight(
FILE: pysteps/tests/test_blending_pca_ens_kalman_filter.py
function test_pca_enkf_combination (line 75) | def test_pca_enkf_combination(
FILE: pysteps/tests/test_blending_skill_scores.py
function test_blending_skill_scores (line 229) | def test_blending_skill_scores(
FILE: pysteps/tests/test_blending_steps.py
function test_steps_blending (line 103) | def test_steps_blending(
FILE: pysteps/tests/test_blending_utils.py
function test_blending_utils (line 166) | def test_blending_utils(
function test_blending_smoothing_utils (line 429) | def test_blending_smoothing_utils(
FILE: pysteps/tests/test_cascade.py
function test_decompose_recompose (line 17) | def test_decompose_recompose():
function test_filter_uniform (line 60) | def test_filter_uniform(variable, expected, tolerance):
function test_filter_uniform_weights_1d (line 65) | def test_filter_uniform_weights_1d():
function test_filter_uniform_weights_2d (line 70) | def test_filter_uniform_weights_2d():
FILE: pysteps/tests/test_datasets.py
function test_load_dataset (line 27) | def test_load_dataset(case_name):
function _test_download_data (line 42) | def _test_download_data():
function _default_path (line 60) | def _default_path():
function test_params_file_creation_path (line 81) | def test_params_file_creation_path(config_dir, file_name, expected_path):
FILE: pysteps/tests/test_decorators.py
function test_memoize (line 7) | def test_memoize():
FILE: pysteps/tests/test_downscaling_rainfarm.py
function data (line 11) | def data():
function test_rainfarm_shape (line 37) | def test_rainfarm_shape(
function test_rainfarm_aggregate (line 76) | def test_rainfarm_aggregate(
function test_rainfarm_alpha (line 110) | def test_rainfarm_alpha(
FILE: pysteps/tests/test_ensscores.py
function test_rankhist_size (line 21) | def test_rankhist_size(X_f, X_o, X_min, normalize, expected):
function test_ensemble_skill (line 43) | def test_ensemble_skill(X_f, X_o, metric, kwargs, expected):
function test_ensemble_spread (line 59) | def test_ensemble_spread(X_f, metric, kwargs, expected):
FILE: pysteps/tests/test_exporters.py
function test_get_geotiff_filename (line 41) | def test_get_geotiff_filename():
function test_io_export_netcdf_one_member_one_time_step (line 60) | def test_io_export_netcdf_one_member_one_time_step(
function test_convert_proj4_to_grid_mapping (line 190) | def test_convert_proj4_to_grid_mapping(proj4str, expected_value):
FILE: pysteps/tests/test_extrapolation_semilagrangian.py
function test_semilagrangian (line 9) | def test_semilagrangian():
function test_wrong_input_dimensions (line 27) | def test_wrong_input_dimensions():
function test_ascending_time_step (line 47) | def test_ascending_time_step():
function test_semilagrangian_timesteps (line 57) | def test_semilagrangian_timesteps():
FILE: pysteps/tests/test_feature.py
function test_feature (line 11) | def test_feature(method, max_num_features):
FILE: pysteps/tests/test_feature_tstorm.py
function test_feature_tstorm_detection (line 33) | def test_feature_tstorm_detection(
FILE: pysteps/tests/test_importer_decorator.py
function test_postprocess_import_decorator (line 20) | def test_postprocess_import_decorator(source, default_dtype):
FILE: pysteps/tests/test_interfaces.py
function _generic_interface_test (line 9) | def _generic_interface_test(method_getter, valid_names_func_pair, invali...
function test_nowcasts_interface (line 22) | def test_nowcasts_interface():
function test_cascade_interface (line 41) | def test_cascade_interface():
function test_extrapolation_interface (line 58) | def test_extrapolation_interface():
function test_io_interface (line 94) | def test_io_interface():
function test_postprocessing_interface (line 155) | def test_postprocessing_interface():
function test_motion_interface (line 211) | def test_motion_interface():
function test_noise_interface (line 245) | def test_noise_interface():
function test_nowcasts_interface (line 280) | def test_nowcasts_interface():
function test_utils_interface (line 320) | def test_utils_interface():
function test_downscaling_interface (line 370) | def test_downscaling_interface():
function test_feature_interface (line 385) | def test_feature_interface():
function test_tracking_interface (line 404) | def test_tracking_interface():
FILE: pysteps/tests/test_io_archive.py
function test_find_by_date_mch (line 8) | def test_find_by_date_mch():
FILE: pysteps/tests/test_io_bom_rf3.py
function test_io_import_bom_rf3_metadata (line 36) | def test_io_import_bom_rf3_metadata(variable, expected, tolerance):
function test_io_import_bom_rf3_shape (line 47) | def test_io_import_bom_rf3_shape():
function test_io_import_bom_rf3_geodata (line 78) | def test_io_import_bom_rf3_geodata(variable, expected, tolerance):
FILE: pysteps/tests/test_io_dwd_hdf5.py
function test_io_import_dwd_hdf5_ry_shape (line 23) | def test_io_import_dwd_hdf5_ry_shape():
function test_io_import_dwd_hdf5_ry_metadata (line 62) | def test_io_import_dwd_hdf5_ry_metadata(variable, expected, tolerance):
FILE: pysteps/tests/test_io_fmi_geotiff.py
function test_io_import_fmi_geotiff_shape (line 20) | def test_io_import_fmi_geotiff_shape():
function test_io_import_fmi_pgm_geodata (line 44) | def test_io_import_fmi_pgm_geodata(variable, expected, tolerance):
FILE: pysteps/tests/test_io_fmi_pgm.py
function test_io_import_fmi_pgm_shape (line 20) | def test_io_import_fmi_pgm_shape():
function test_io_import_mch_gif_dataset_attrs (line 62) | def test_io_import_mch_gif_dataset_attrs(variable, expected, tolerance):
function test_io_import_fmi_pgm_geodata (line 82) | def test_io_import_fmi_pgm_geodata(variable, expected, tolerance):
FILE: pysteps/tests/test_io_knmi_hdf5.py
function test_io_import_knmi_hdf5_shape (line 18) | def test_io_import_knmi_hdf5_shape():
function test_io_import_knmi_hdf5_metadata (line 52) | def test_io_import_knmi_hdf5_metadata(variable, expected, tolerance):
FILE: pysteps/tests/test_io_mch_gif.py
function test_io_import_mch_gif_shape (line 17) | def test_io_import_mch_gif_shape():
function test_io_import_mch_gif_dataset_attrs (line 53) | def test_io_import_mch_gif_dataset_attrs(variable, expected, tolerance):
function test_io_import_mch_geodata (line 73) | def test_io_import_mch_geodata(variable, expected, tolerance):
FILE: pysteps/tests/test_io_mrms_grib.py
function test_io_import_mrms_grib (line 14) | def test_io_import_mrms_grib():
FILE: pysteps/tests/test_io_nowcast_importers.py
function test_import_netcdf (line 20) | def test_import_netcdf(precip, metadata, tmp_path):
FILE: pysteps/tests/test_io_opera_hdf5.py
function test_io_import_opera_hdf5_odyssey_shape (line 42) | def test_io_import_opera_hdf5_odyssey_shape():
function test_io_import_opera_hdf5_cirrus_shape (line 47) | def test_io_import_opera_hdf5_cirrus_shape():
function test_io_import_opera_hdf5_nimbus_rain_rate_shape (line 52) | def test_io_import_opera_hdf5_nimbus_rain_rate_shape():
function test_io_import_opera_hdf5_nimbus_rain_accum_shape (line 57) | def test_io_import_opera_hdf5_nimbus_rain_accum_shape():
function test_io_import_opera_hdf5_odyssey_dataset_attrs (line 95) | def test_io_import_opera_hdf5_odyssey_dataset_attrs(variable, expected, ...
function test_io_import_opera_hdf5_cirrus_dataset_attrs (line 125) | def test_io_import_opera_hdf5_cirrus_dataset_attrs(variable, expected, t...
function test_io_import_opera_hdf5_nimbus_rain_rate_dataset_attrs (line 155) | def test_io_import_opera_hdf5_nimbus_rain_rate_dataset_attrs(
function test_io_import_opera_hdf5_nimbus_rain_accum_dataset_attrs (line 187) | def test_io_import_opera_hdf5_nimbus_rain_accum_dataset_attrs(
FILE: pysteps/tests/test_io_readers.py
function test_read_timeseries_mch (line 9) | def test_read_timeseries_mch():
FILE: pysteps/tests/test_io_saf_crri.py
function test_io_import_saf_crri_geodata (line 31) | def test_io_import_saf_crri_geodata(variable, expected, tolerance):
function test_io_import_saf_crri_attrs (line 61) | def test_io_import_saf_crri_attrs(variable, expected, tolerance):
function test_io_import_saf_crri_extent (line 80) | def test_io_import_saf_crri_extent(extent, expected_extent, expected_sha...
FILE: pysteps/tests/test_motion.py
function not_raises (line 34) | def not_raises(_exception):
function _create_motion_field (line 44) | def _create_motion_field(input_precip, motion_type):
function _create_observations (line 83) | def _create_observations(input_precip, motion_type, num_times=9):
function test_optflow_method_convergence (line 173) | def test_optflow_method_convergence(
function test_no_precipitation (line 266) | def test_no_precipitation(optflow_method_name, num_times):
function test_input_shape_checks (line 307) | def test_input_shape_checks(
function test_vet_padding (line 331) | def test_vet_padding():
function test_vet_cost_function (line 365) | def test_vet_cost_function():
function test_motion_masked_array (line 407) | def test_motion_masked_array(method, kwargs):
FILE: pysteps/tests/test_motion_farneback.py
function test_farneback_params (line 41) | def test_farneback_params(
function test_farneback_invalid_shape (line 85) | def test_farneback_invalid_shape():
function test_farneback_nan_input (line 93) | def test_farneback_nan_input():
function test_farneback_cv2_missing (line 103) | def test_farneback_cv2_missing(monkeypatch):
function test_farneback_sigma_zero (line 112) | def test_farneback_sigma_zero():
function test_farneback_small_window (line 121) | def test_farneback_small_window():
function test_farneback_verbose (line 129) | def test_farneback_verbose(capsys):
FILE: pysteps/tests/test_motion_lk.py
function test_lk (line 43) | def test_lk(
FILE: pysteps/tests/test_noise_fftgenerators.py
function test_noise_param_2d_fft_filter (line 16) | def test_noise_param_2d_fft_filter():
function test_noise_nonparam_2d_fft_filter (line 28) | def test_noise_nonparam_2d_fft_filter():
function test_noise_nonparam_2d_ssft_filter (line 40) | def test_noise_nonparam_2d_ssft_filter():
function test_noise_nonparam_2d_nested_filter (line 52) | def test_noise_nonparam_2d_nested_filter():
FILE: pysteps/tests/test_noise_motion.py
function test_noise_motion_get_default_params_bps_par (line 13) | def test_noise_motion_get_default_params_bps_par():
function test_noise_motion_get_default_params_bps_perp (line 20) | def test_noise_motion_get_default_params_bps_perp():
function test_initialize_bps (line 40) | def test_initialize_bps(variable, expected):
function test_generate_bps (line 51) | def test_generate_bps():
FILE: pysteps/tests/test_nowcasts_anvil.py
function test_default_anvil_norain (line 23) | def test_default_anvil_norain():
function test_anvil_rainrate (line 46) | def test_anvil_rainrate(
FILE: pysteps/tests/test_nowcasts_lagrangian_probability.py
function test_numerical_example (line 10) | def test_numerical_example():
function test_numerical_example_with_float_slope_and_float_list_timesteps (line 35) | def test_numerical_example_with_float_slope_and_float_list_timesteps():
function test_real_case (line 54) | def test_real_case():
function test_wrong_inputs (line 88) | def test_wrong_inputs():
FILE: pysteps/tests/test_nowcasts_linda.py
function test_default_linda_norain (line 30) | def test_default_linda_norain():
function test_linda (line 57) | def test_linda(
function test_linda_wrong_inputs (line 133) | def test_linda_wrong_inputs():
function test_linda_callback (line 169) | def test_linda_callback(tmp_path):
FILE: pysteps/tests/test_nowcasts_sprog.py
function test_default_sprog_norain (line 28) | def test_default_sprog_norain():
function test_sprog (line 52) | def test_sprog(
FILE: pysteps/tests/test_nowcasts_sseps.py
function test_default_sseps_norain (line 26) | def test_default_sseps_norain():
function test_sseps (line 58) | def test_sseps(
FILE: pysteps/tests/test_nowcasts_steps.py
function test_default_steps_norain (line 33) | def test_default_steps_norain():
function test_steps_skill (line 61) | def test_steps_skill(
function test_steps_callback (line 119) | def test_steps_callback(tmp_path):
FILE: pysteps/tests/test_nowcasts_utils.py
function test_nowcast_main_loop (line 25) | def test_nowcast_main_loop(
FILE: pysteps/tests/test_paramsrc.py
function test_read_paramsrc (line 37) | def test_read_paramsrc():
FILE: pysteps/tests/test_plt_animate.py
function test_animate (line 41) | def test_animate(anim_args, anim_kwargs):
function test_animate_valueerrors (line 54) | def test_animate_valueerrors(anim_args, anim_kwargs):
function test_animate_typeerrors (line 70) | def test_animate_typeerrors(anim_args, anim_kwargs):
function test_animate_save (line 75) | def test_animate_save(tmp_path):
FILE: pysteps/tests/test_plt_cartopy.py
function test_visualization_plot_precip_field (line 27) | def test_visualization_plot_precip_field(source, map_kwargs, pass_geodata):
FILE: pysteps/tests/test_plt_motionfields.py
function test_visualization_motionfields_quiver (line 30) | def test_visualization_motionfields_quiver(
function test_visualization_motionfields_streamplot (line 75) | def test_visualization_motionfields_streamplot(
FILE: pysteps/tests/test_plt_precipfields.py
function test_visualization_plot_precip_field (line 40) | def test_visualization_plot_precip_field(
FILE: pysteps/tests/test_plugins_support.py
function _check_installed_importer_plugin (line 23) | def _check_installed_importer_plugin(import_func_name):
function _check_installed_diagnostic_plugin (line 35) | def _check_installed_diagnostic_plugin(diagnostic_func_name):
function _create_and_install_plugin (line 45) | def _create_and_install_plugin(project_name, plugin_type):
function _uninstall_plugin (line 80) | def _uninstall_plugin(project_name):
function test_importers_plugins (line 87) | def test_importers_plugins():
function test_diagnostic_plugins (line 92) | def test_diagnostic_plugins():
FILE: pysteps/tests/test_postprocessing_ensemblestats.py
function test_ensemblestats_mean (line 34) | def test_ensemblestats_mean(X, ignore_nan, X_thr, expected):
function test_exceptions_mean (line 45) | def test_exceptions_mean(X):
function test_ensemblestats_excprob (line 62) | def test_ensemblestats_excprob(X, X_thr, ignore_nan, expected):
function test_exceptions_excprob (line 72) | def test_exceptions_excprob(X):
function test_ensemblestats_banddepth (line 89) | def test_ensemblestats_banddepth(X, thr, norm, expected):
FILE: pysteps/tests/test_postprocessing_probmatching.py
class TestResampleDistributions (line 10) | class TestResampleDistributions:
method setup (line 13) | def setup(self):
method test_valid_inputs (line 17) | def test_valid_inputs(self):
method test_probability_zero (line 28) | def test_probability_zero(self):
method test_probability_one (line 37) | def test_probability_one(self):
method test_nan_in_arr1_prob_1 (line 46) | def test_nan_in_arr1_prob_1(self):
method test_nan_in_arr1_prob_0 (line 56) | def test_nan_in_arr1_prob_0(self):
method test_nan_in_arr2_prob_1 (line 66) | def test_nan_in_arr2_prob_1(self):
method test_nan_in_arr2_prob_0 (line 76) | def test_nan_in_arr2_prob_0(self):
method test_nan_in_both_prob_1 (line 86) | def test_nan_in_both_prob_1(self):
method test_nan_in_both_prob_0 (line 96) | def test_nan_in_both_prob_0(self):
class TestNonparamMatchEmpiricalCDF (line 107) | class TestNonparamMatchEmpiricalCDF:
method setup (line 109) | def setup(self):
method test_ignore_indices_with_nans_both (line 113) | def test_ignore_indices_with_nans_both(self):
method test_zeroes_initial (line 122) | def test_zeroes_initial(self):
method test_nans_initial (line 129) | def test_nans_initial(self):
method test_all_nans_initial (line 140) | def test_all_nans_initial(self):
method test_ignore_indices_nans_initial (line 146) | def test_ignore_indices_nans_initial(self):
method test_ignore_indices_nans_target (line 159) | def test_ignore_indices_nans_target(self):
method test_more_zeroes_in_initial (line 171) | def test_more_zeroes_in_initial(self):
method test_more_zeroes_in_initial_unsrt (line 180) | def test_more_zeroes_in_initial_unsrt(self):
method test_more_zeroes_in_target (line 189) | def test_more_zeroes_in_target(self):
method test_2dim_array (line 198) | def test_2dim_array(self):
FILE: pysteps/tests/test_timeseries_autoregression.py
function test_estimate_ar_params_ols (line 14) | def test_estimate_ar_params_ols():
function test_estimate_ar_params_yw (line 38) | def test_estimate_ar_params_yw():
function test_estimate_ar_params_yw_localized (line 49) | def test_estimate_ar_params_yw_localized():
function test_estimate_ar_params_ols_localized (line 62) | def test_estimate_ar_params_ols_localized():
function test_estimate_var_params_ols (line 86) | def test_estimate_var_params_ols():
function test_estimate_var_params_ols_localized (line 113) | def test_estimate_var_params_ols_localized():
function test_estimate_var_params_yw (line 140) | def test_estimate_var_params_yw():
function test_estimate_var_params_yw_localized (line 151) | def test_estimate_var_params_yw_localized():
function test_iterate_ar (line 165) | def test_iterate_ar():
function test_iterate_ar_localized (line 173) | def test_iterate_ar_localized():
function test_iterate_var (line 182) | def test_iterate_var():
function test_iterate_var_localized (line 191) | def test_iterate_var_localized():
function _create_data_multivariate (line 200) | def _create_data_multivariate():
function _create_data_univariate (line 224) | def _create_data_univariate():
FILE: pysteps/tests/test_tracking_tdating.py
function test_tracking_tdating_dating_multistep (line 27) | def test_tracking_tdating_dating_multistep(source, len_timesteps, output...
function test_tracking_tdating_dating (line 74) | def test_tracking_tdating_dating(source, dry_input, output_splits_merges):
FILE: pysteps/tests/test_utils_arrays.py
function test_compute_centred_coord_array (line 19) | def test_compute_centred_coord_array(M, N, expected):
FILE: pysteps/tests/test_utils_cleansing.py
function test_decluster_empty (line 9) | def test_decluster_empty():
function test_decluster_single (line 23) | def test_decluster_single():
function test_decluster (line 42) | def test_decluster():
function test_decluster_value_error_is_raise_when_input_has_nan (line 65) | def test_decluster_value_error_is_raise_when_input_has_nan():
function test_detect_outlier_constant (line 74) | def test_detect_outlier_constant():
function test_detect_outlier_univariate_global (line 97) | def test_detect_outlier_univariate_global():
function test_detect_outlier_multivariate_global (line 117) | def test_detect_outlier_multivariate_global():
function test_detect_outlier_univariate_local (line 140) | def test_detect_outlier_univariate_local():
function test_detect_outlier_multivariate_local (line 162) | def test_detect_outlier_multivariate_local():
function test_detect_outlier_wrong_input_dims_raise_error (line 188) | def test_detect_outlier_wrong_input_dims_raise_error():
FILE: pysteps/tests/test_utils_conversion.py
function test_to_rainrate (line 114) | def test_to_rainrate(R, metadata, expected):
function test_to_raindepth (line 224) | def test_to_raindepth(R, metadata, expected):
function test_to_reflectivity (line 334) | def test_to_reflectivity(R, metadata, expected):
FILE: pysteps/tests/test_utils_dimension.py
function test_aggregate_fields (line 63) | def test_aggregate_fields(data, window_size, axis, method, expected):
function test_aggregate_fields_errors (line 90) | def test_aggregate_fields_errors():
function test_aggregate_fields_time (line 131) | def test_aggregate_fields_time(R, metadata, time_window_min, ignore_nan,...
function test_aggregate_fields_space (line 166) | def test_aggregate_fields_space(R, metadata, space_window, ignore_nan, e...
function test_clip_domain (line 227) | def test_clip_domain(R, metadata, extent, expected):
function test_square_domain (line 289) | def test_square_domain(R, metadata, method, inverse, expected):
FILE: pysteps/tests/test_utils_interpolate.py
function test_interp_univariate (line 14) | def test_interp_univariate(interp_method):
function test_interp_multivariate (line 29) | def test_interp_multivariate(interp_method):
function test_wrong_inputs (line 45) | def test_wrong_inputs(interp_method):
function test_one_sample_input (line 82) | def test_one_sample_input(interp_method):
function test_uniform_input (line 97) | def test_uniform_input(interp_method):
function test_idwinterp2d_k1 (line 115) | def test_idwinterp2d_k1():
function test_idwinterp2d_kNone (line 130) | def test_idwinterp2d_kNone():
FILE: pysteps/tests/test_utils_pca.py
function test_pca (line 18) | def test_pca(len_y, n_components):
FILE: pysteps/tests/test_utils_reprojection.py
function test_utils_reproject_grids (line 76) | def test_utils_reproject_grids(
FILE: pysteps/tests/test_utils_spectral.py
function test_rapsd (line 14) | def test_rapsd(field):
FILE: pysteps/tests/test_utils_transformation.py
function test_boxcox_transform (line 77) | def test_boxcox_transform(R, metadata, Lambda, threshold, zerovalue, inv...
function test_dB_transform (line 123) | def test_dB_transform(R, metadata, threshold, zerovalue, inverse, expect...
function test_NQ_transform (line 149) | def test_NQ_transform(R, metadata, inverse, expected):
function test_sqrt_transform (line 186) | def test_sqrt_transform(R, metadata, inverse, expected):
FILE: pysteps/tests/test_verification_detcatscores.py
function test_det_cat_fct (line 59) | def test_det_cat_fct(pred, obs, thr, scores, expected):
FILE: pysteps/tests/test_verification_detcontscores.py
function test_det_cont_fct (line 146) | def test_det_cont_fct(pred, obs, scores, axis, conditioning, expected):
FILE: pysteps/tests/test_verification_probscores.py
function test_CRPS (line 18) | def test_CRPS(X_f, X_o, expected):
function test_reldiag_sum (line 28) | def test_reldiag_sum(X_f, X_o, X_min, n_bins, min_count, expected):
function test_ROC_curve_area (line 43) | def test_ROC_curve_area(X_f, X_o, X_min, n_prob_thrs, compute_area, expe...
FILE: pysteps/tests/test_verification_salscores.py
class TestSAL (line 17) | class TestSAL:
method test_sal_zeros (line 21) | def test_sal_zeros(self, converter, thr_factor):
method test_sal_same_image (line 36) | def test_sal_same_image(self, converter, thr_factor):
method test_sal_translation (line 47) | def test_sal_translation(self, converter, thr_factor):
FILE: pysteps/tests/test_verification_spatialscores.py
function test_intensity_scale (line 17) | def test_intensity_scale(X_f, X_o, name, thrs, scales, wavelet, expected):
function test_intensity_scale_methods (line 36) | def test_intensity_scale_methods(R1, R2, name, thrs, scales, wavelet):
FILE: pysteps/timeseries/autoregression.py
function adjust_lag2_corrcoef1 (line 31) | def adjust_lag2_corrcoef1(gamma_1, gamma_2):
function adjust_lag2_corrcoef2 (line 55) | def adjust_lag2_corrcoef2(gamma_1, gamma_2):
function ar_acf (line 81) | def ar_acf(gamma, n=None):
function estimate_ar_params_ols (line 123) | def estimate_ar_params_ols(
function estimate_ar_params_ols_localized (line 235) | def estimate_ar_params_ols_localized(
function estimate_ar_params_yw (line 402) | def estimate_ar_params_yw(gamma, d=0, check_stationarity=True):
function estimate_ar_params_yw_localized (line 478) | def estimate_ar_params_yw_localized(gamma, d=0):
function estimate_var_params_ols (line 556) | def estimate_var_params_ols(
function estimate_var_params_ols_localized (line 674) | def estimate_var_params_ols_localized(
function estimate_var_params_yw (line 852) | def estimate_var_params_yw(gamma, d=0, check_stationarity=True):
function estimate_var_params_yw_localized (line 937) | def estimate_var_params_yw_localized(gamma, d=0):
function iterate_ar_model (line 1018) | def iterate_ar_model(x, phi, eps=None):
function iterate_var_model (line 1074) | def iterate_var_model(x, phi, eps=None):
function test_ar_stationarity (line 1136) | def test_ar_stationarity(phi):
function test_var_stationarity (line 1162) | def test_var_stationarity(phi):
function _compute_differenced_model_params (line 1197) | def _compute_differenced_model_params(phi, p, q, d):
FILE: pysteps/timeseries/correlation.py
function temporal_autocorrelation (line 21) | def temporal_autocorrelation(
function temporal_autocorrelation_multivariate (line 133) | def temporal_autocorrelation_multivariate(
function _moving_window_corrcoef (line 222) | def _moving_window_corrcoef(x, y, window_radius, window="gaussian", mask...
FILE: pysteps/tracking/interface.py
function get_method (line 23) | def get_method(name):
FILE: pysteps/tracking/lucaskanade.py
function track_features (line 35) | def track_features(
FILE: pysteps/tracking/tdating.py
function dating (line 52) | def dating(
function tracking (line 271) | def tracking(
function advect (line 346) | def advect(cells_id, labels, V1, output_splits_merges=False):
function match (line 400) | def match(cells_ad, labels, match_frac=0.4, split_frac=0.1, output_split...
function couple_track (line 448) | def couple_track(cell_list, max_ID, mintrack):
FILE: pysteps/utils/arrays.py
function compute_centred_coord_array (line 16) | def compute_centred_coord_array(M, N):
FILE: pysteps/utils/check_norain.py
function check_norain (line 6) | def check_norain(precip_arr, precip_thr=None, norain_thr=0.0, win_fun=No...
FILE: pysteps/utils/cleansing.py
function decluster (line 21) | def decluster(coord, input_array, scale, min_samples=1, verbose=False):
function detect_outliers (line 124) | def detect_outliers(input_array, thr, coord=None, k=None, verbose=False):
FILE: pysteps/utils/conversion.py
function to_rainrate (line 25) | def to_rainrate(R, metadata, zr_a=None, zr_b=None):
function to_raindepth (line 116) | def to_raindepth(R, metadata, zr_a=None, zr_b=None):
function to_reflectivity (line 207) | def to_reflectivity(R, metadata, zr_a=None, zr_b=None):
FILE: pysteps/utils/dimension.py
function aggregate_fields_time (line 25) | def aggregate_fields_time(R, metadata, time_window_min, ignore_nan=False):
function aggregate_fields_space (line 120) | def aggregate_fields_space(R, metadata, space_window, ignore_nan=False):
function aggregate_fields (line 219) | def aggregate_fields(data, window_size, axis=0, method="mean", trim=False):
function clip_domain (line 342) | def clip_domain(R, metadata, extent=None):
function square_domain (line 454) | def square_domain(R, metadata, method="pad", inverse=False):
FILE: pysteps/utils/fft.py
function get_numpy (line 20) | def get_numpy(shape, fftn_shape=None, **kwargs):
function get_scipy (line 39) | def get_scipy(shape, fftn_shape=None, **kwargs):
function get_pyfftw (line 61) | def get_pyfftw(shape, fftn_shape=None, n_threads=1, **kwargs):
FILE: pysteps/utils/images.py
function morph_opening (line 27) | def morph_opening(input_image, thr, n):
FILE: pysteps/utils/interface.py
function get_method (line 28) | def get_method(name, **kwargs):
function _get_fft_method (line 254) | def _get_fft_method(name, **kwargs):
FILE: pysteps/utils/interpolate.py
function idwinterp2d (line 27) | def idwinterp2d(
function rbfinterp2d (line 118) | def rbfinterp2d(xy_coord, values, xgrid, ygrid, **kwargs):
function _cKDTree_cached (line 174) | def _cKDTree_cached(*args, **kwargs):
function _Rbf_cached (line 180) | def _Rbf_cached(*args, **kwargs):
FILE: pysteps/utils/pca.py
function pca_transform (line 25) | def pca_transform(
function pca_backtransform (line 136) | def pca_backtransform(forecast_ens_pc: np.ndarray, pca_params: dict):
FILE: pysteps/utils/reprojection.py
function reproject_grids (line 36) | def reproject_grids(src_array, dst_array, metadata_src, metadata_dst):
function unstructured2regular (line 132) | def unstructured2regular(src_array, metadata_src, metadata_dst):
FILE: pysteps/utils/spectral.py
function corrcoef (line 22) | def corrcoef(X, Y, shape, use_full_fft=False):
function mean (line 79) | def mean(X, shape):
function rapsd (line 100) | def rapsd(
function remove_rain_norain_discontinuity (line 183) | def remove_rain_norain_discontinuity(R):
function std (line 208) | def std(X, shape, use_full_fft=False):
FILE: pysteps/utils/tapering.py
function compute_mask_window_function (line 19) | def compute_mask_window_function(mask, func, **kwargs):
function compute_window_function (line 52) | def compute_window_function(m, n, func, **kwargs):
function _compute_mask_distances (line 97) | def _compute_mask_distances(mask):
function _hann (line 109) | def _hann(R):
function _tukey (line 119) | def _tukey(R, alpha):
function _tukey_masked (line 134) | def _tukey_masked(R, r_max, mask):
FILE: pysteps/utils/transformation.py
function boxcox_transform (line 27) | def boxcox_transform(
function dB_transform (line 150) | def dB_transform(R, metadata=None, threshold=None, zerovalue=None, inver...
function NQ_transform (line 237) | def NQ_transform(R, metadata=None, inverse=False, **kwargs):
function sqrt_transform (line 329) | def sqrt_transform(R, metadata=None, inverse=False, **kwargs):
FILE: pysteps/verification/detcatscores.py
function det_cat_fct (line 23) | def det_cat_fct(pred, obs, thr, scores="", axis=None):
function det_cat_fct_init (line 100) | def det_cat_fct_init(thr, axis=None):
function det_cat_fct_accum (line 145) | def det_cat_fct_accum(contab, pred, obs):
function det_cat_fct_merge (line 219) | def det_cat_fct_merge(contab_1, contab_2):
function det_cat_fct_compute (line 266) | def det_cat_fct_compute(contab, scores=""):
FILE: pysteps/verification/detcontscores.py
function det_cont_fct (line 23) | def det_cont_fct(pred, obs, scores="", axis=None, conditioning=None, thr...
function det_cont_fct_init (line 208) | def det_cont_fct_init(axis=None, conditioning=None, thr=0.0):
function det_cont_fct_accum (line 265) | def det_cont_fct_accum(err, pred, obs):
function det_cont_fct_merge (line 393) | def det_cont_fct_merge(err_1, err_2):
function det_cont_fct_compute (line 477) | def det_cont_fct_compute(err, scores=""):
function _parallel_mean (line 601) | def _parallel_mean(avg_a, count_a, avg_b, count_b):
function _parallel_var (line 609) | def _parallel_var(avg_a, count_a, var_a, avg_b, count_b, var_b):
function _parallel_cov (line 626) | def _parallel_cov(cov_a, avg_xa, avg_ya, count_a, cov_b, avg_xb, avg_yb,...
function _uniquelist (line 645) | def _uniquelist(mylist):
function _scatter (line 650) | def _scatter(pred, obs, axis=None):
function _spearmanr (line 703) | def _spearmanr(pred, obs, axis=None):
FILE: pysteps/verification/ensscores.py
function ensemble_skill (line 23) | def ensemble_skill(X_f, X_o, metric, **kwargs):
function ensemble_spread (line 75) | def ensemble_spread(X_f, metric, **kwargs):
function rankhist (line 127) | def rankhist(X_f, X_o, X_min=None, normalize=True):
function rankhist_init (line 157) | def rankhist_init(num_ens_members, X_min=None):
function rankhist_accum (line 186) | def rankhist_accum(rankhist, X_f, X_o):
function rankhist_compute (line 253) | def rankhist_compute(rankhist, normalize=True):
FILE: pysteps/verification/interface.py
function get_method (line 15) | def get_method(name, type="deterministic"):
FILE: pysteps/verification/lifetime.py
function lifetime (line 24) | def lifetime(X_s, X_t, rule="1/e"):
function lifetime_init (line 63) | def lifetime_init(rule="1/e"):
function lifetime_accum (line 98) | def lifetime_accum(lifetime, X_s, X_t):
function lifetime_compute (line 143) | def lifetime_compute(lifetime):
FILE: pysteps/verification/plots.py
function plot_intensityscale (line 24) | def plot_intensityscale(intscale, fig=None, vminmax=None, kmperpixel=Non...
function plot_rankhist (line 88) | def plot_rankhist(rankhist, ax=None):
function plot_reldiag (line 119) | def plot_reldiag(reldiag, ax=None):
function plot_ROC (line 179) | def plot_ROC(ROC, ax=None, opt_prob_thr=False):
FILE: pysteps/verification/probscores.py
function CRPS (line 28) | def CRPS(X_f, X_o):
function CRPS_init (line 58) | def CRPS_init():
function CRPS_accum (line 70) | def CRPS_accum(CRPS, X_f, X_o):
function CRPS_compute (line 134) | def CRPS_compute(CRPS):
function reldiag (line 151) | def reldiag(P_f, X_o, X_min, n_bins=10, min_count=10):
function reldiag_init (line 184) | def reldiag_init(X_min, n_bins=10, min_count=10):
function reldiag_accum (line 221) | def reldiag_accum(reldiag, P_f, X_o):
function reldiag_compute (line 267) | def reldiag_compute(reldiag):
function ROC_curve (line 288) | def ROC_curve(P_f, X_o, X_min, n_prob_thrs=10, compute_area=False):
function ROC_curve_init (line 323) | def ROC_curve_init(X_min, n_prob_thrs=10):
function ROC_curve_accum (line 352) | def ROC_curve_accum(ROC, P_f, X_o):
function ROC_curve_compute (line 382) | def ROC_curve_compute(ROC, compute_area=False):
FILE: pysteps/verification/salscores.py
function sal (line 51) | def sal(
function sal_structure (line 115) | def sal_structure(
function sal_amplitude (line 166) | def sal_amplitude(prediction, observation):
function sal_location (line 198) | def sal_location(
function _sal_l1_param (line 245) | def _sal_l1_param(prediction, observation):
function _sal_l2_param (line 274) | def _sal_l2_param(prediction, observation, thr_factor, thr_quantile, tst...
function _sal_detect_objects (line 311) | def _sal_detect_objects(precip, thr_factor, thr_quantile, tstorm_kwargs):
function _sal_scaled_volume (line 370) | def _sal_scaled_volume(precip_objects):
function _sal_weighted_distance (line 416) | def _sal_weighted_distance(precip, thr_factor, thr_quantile, tstorm_kwar...
FILE: pysteps/verification/spatialscores.py
function intensity_scale (line 43) | def intensity_scale(X_f, X_o, name, thrs, scales=None, wavelet="Haar"):
function intensity_scale_init (line 97) | def intensity_scale_init(name, thrs, scales=None, wavelet="Haar"):
function intensity_scale_accum (line 182) | def intensity_scale_accum(intscale, X_f, X_o):
function intensity_scale_merge (line 213) | def intensity_scale_merge(intscale_1, intscale_2):
function intensity_scale_compute (line 261) | def intensity_scale_compute(intscale):
function binary_mse (line 298) | def binary_mse(X_f, X_o, thr, wavelet="haar", return_scales=True):
function binary_mse_init (line 339) | def binary_mse_init(thr, wavelet="haar"):
function binary_mse_accum (line 363) | def binary_mse_accum(bmse, X_f, X_o):
function binary_mse_merge (line 421) | def binary_mse_merge(bmse_1, bmse_2):
function binary_mse_compute (line 475) | def binary_mse_compute(bmse, return_scales=True):
function fss (line 516) | def fss(X_f, X_o, thr, scale):
function fss_init (line 549) | def fss_init(thr, scale):
function fss_accum (line 572) | def fss_accum(fss, X_f, X_o):
function fss_merge (line 613) | def fss_merge(fss_1, fss_2):
function fss_compute (line 657) | def fss_compute(fss):
function _wavelet_decomp (line 680) | def _wavelet_decomp(X, w):
FILE: pysteps/visualization/animations.py
function animate (line 24) | def animate(
FILE: pysteps/visualization/basemaps.py
function plot_geography (line 53) | def plot_geography(
function plot_map_cartopy (line 144) | def plot_map_cartopy(
FILE: pysteps/visualization/motionfields.py
function motion_plot (line 27) | def motion_plot(
function quiver (line 145) | def quiver(
function streamplot (line 193) | def streamplot(
FILE: pysteps/visualization/precipfields.py
function plot_precip_field (line 33) | def plot_precip_field(
function _plot_field (line 216) | def _plot_field(precip, ax, extent, cmap, norm, origin=None, x_grid=None...
function get_colormap (line 242) | def get_colormap(ptype, units="mm/h", colorscale="pysteps"):
function _get_colorlist (line 306) | def _get_colorlist(units="mm/h", colorscale="pysteps"):
function _dynamic_formatting_floats (line 491) | def _dynamic_formatting_floats(float_array, colorscale="pysteps"):
function _validate_colormap_config (line 521) | def _validate_colormap_config(colormap_config, ptype):
FILE: pysteps/visualization/spectral.py
function plot_spectrum1d (line 18) | def plot_spectrum1d(
FILE: pysteps/visualization/thunderstorms.py
function plot_track (line 27) | def plot_track(track_list, geodata=None, ref_shape=None):
function plot_cart_contour (line 62) | def plot_cart_contour(contours, geodata=None, ref_shape=None):
function _pix2coord_factory (line 99) | def _pix2coord_factory(geodata, ref_shape):
FILE: pysteps/visualization/utils.py
function parse_proj4_string (line 79) | def parse_proj4_string(proj4str):
function proj4_to_cartopy (line 109) | def proj4_to_cartopy(proj4str):
function reproject_geodata (line 188) | def reproject_geodata(geodata, t_proj4str, return_grid=None):
function get_geogrid (line 282) | def get_geogrid(nlat, nlon, geodata=None):
function get_basemap_axis (line 376) | def get_basemap_axis(extent, geodata=None, ax=None, map_kwargs=None):
Condensed preview — 264 files, each showing path, character count, and a content snippet. Download the .json file or copy for the full structured content (2,050K chars).
[
{
"path": ".github/workflows/check_black.yml",
"chars": 1075,
"preview": "# This workflow will test the code base using the LATEST version of black\n\n# IMPORTANT: Black is under development. Henc"
},
{
"path": ".github/workflows/python-publish.yml",
"chars": 815,
"preview": "# This workflows will upload a Python Package using Twine when a release is created\n# For more information see: https://"
},
{
"path": ".github/workflows/test_pysteps.yml",
"chars": 3769,
"preview": "name: Test pysteps\n\non:\n # Triggers the workflow on push or pull request events to the master branch\n push:\n branch"
},
{
"path": ".gitignore",
"chars": 1079,
"preview": "# Byte-compiled / optimized / DLL files\n__pycache__/\n*.py[cod]\n*$py.class\n\n# C extensions\n*.so\n*.c\n\n# Distribution / pac"
},
{
"path": ".pre-commit-config.yaml",
"chars": 121,
"preview": "repos:\n- repo: https://github.com/psf/black\n rev: 26.1.0\n hooks:\n - id: black\n language_version: python3"
},
{
"path": ".readthedocs.yml",
"chars": 431,
"preview": "# Read the Docs configuration file\n# See https://docs.readthedocs.io/en/stable/config-file/v2.html for details\n\nversion:"
},
{
"path": "CITATION.bib",
"chars": 1122,
"preview": "@Article{gmd-12-4185-2019,\n AUTHOR = {Pulkkinen, S. and Nerini, D. and P\\'erez Hortal, A. A. and Velasco-Forero, C. and"
},
{
"path": "CONTRIBUTING.rst",
"chars": 19548,
"preview": "Contributing to pysteps\n=======================\n\nWelcome! Pysteps is a community-driven initiative for developing and\nma"
},
{
"path": "LICENSE",
"chars": 1518,
"preview": "BSD 3-Clause License\n\nCopyright (c) 2019, PySteps developers\nAll rights reserved.\n\nRedistribution and use in source and "
},
{
"path": "MANIFEST.in",
"chars": 254,
"preview": "include LICENSE *.txt *.rst\ninclude pysteps/pystepsrc\ninclude pysteps/pystepsrc_schema.json\ninclude pysteps/io/mch_lut_8"
},
{
"path": "PKG-INFO",
"chars": 544,
"preview": "Metadata-Version: 1.2\nName: pysteps\nVersion: 1.20.0\nSummary: Python framework for short-term ensemble prediction systems"
},
{
"path": "README.rst",
"chars": 6568,
"preview": "pysteps - Python framework for short-term ensemble prediction systems\n=================================================="
},
{
"path": "ci/ci_test_env.yml",
"chars": 513,
"preview": "# pysteps development environment\nname: test_environment\nchannels:\n - conda-forge\n - defaults\ndependencies:\n - python"
},
{
"path": "ci/fetch_pysteps_data.py",
"chars": 705,
"preview": "# -*- coding: utf-8 -*-\n\"\"\"\nScript used to install the pysteps data in a test environment and set a pystepsrc\nconfigurat"
},
{
"path": "ci/test_plugin_support.py",
"chars": 550,
"preview": "# -*- coding: utf-8 -*-\n\n\"\"\"\nScript to test the plugin support.\n\nThis script assumes that a package created with the def"
},
{
"path": "doc/.gitignore",
"chars": 31,
"preview": "_build/\ngenerated\nauto_examples"
},
{
"path": "doc/Makefile",
"chars": 610,
"preview": "# Minimal makefile for Sphinx documentation\n#\n\n# You can set these variables from the command line.\nSPHINXOPTS =\nSPHI"
},
{
"path": "doc/_static/pysteps.css",
"chars": 529,
"preview": "\n.section h1 {\n border-bottom: 2px solid #0099ff;\n display: inline-block;\n}\n\n.section h2 {\n border-bottom: 2px "
},
{
"path": "doc/_templates/layout.html",
"chars": 86,
"preview": "{% extends \"!layout.html\" %}\n{% set css_files = css_files + [\"_static/pysteps.css\"] %}"
},
{
"path": "doc/make.bat",
"chars": 811,
"preview": "@ECHO OFF\r\n\r\npushd %~dp0\r\n\r\nREM Command file for Sphinx documentation\r\n\r\nif \"%SPHINXBUILD%\" == \"\" (\r\n\tset SPHINXBUILD=sp"
},
{
"path": "doc/rebuild_docs.sh",
"chars": 136,
"preview": "# Build documentation from scratch.\n\nrm -r source/generated &> /dev/null\nrm -r source/auto_examples &> /dev/null\n\nmake c"
},
{
"path": "doc/requirements.txt",
"chars": 260,
"preview": "# Additional requirements related to the documentation build only\nsphinx\nsphinxcontrib.bibtex\nsphinx-book-theme\nsphinx_g"
},
{
"path": "doc/source/conf.py",
"chars": 8445,
"preview": "# -*- coding: utf-8 -*-\n\n# All configuration values have a default; values that are commented out\n# serve to show the de"
},
{
"path": "doc/source/developer_guide/build_the_docs.rst",
"chars": 1908,
"preview": ".. _build_the_docs:\n\n=================\nBuilding the docs\n=================\n\nThe pysteps documentations is build using\n`S"
},
{
"path": "doc/source/developer_guide/contributors_guidelines.rst",
"chars": 67,
"preview": ".. _contributor_guidelines:\n\n.. include:: ../../../CONTRIBUTING.rst"
},
{
"path": "doc/source/developer_guide/importer_plugins.rst",
"chars": 2066,
"preview": ".. _importer-plugins:\n\n===========================\nCreate your importer plugin\n===========================\n\nSince versio"
},
{
"path": "doc/source/developer_guide/pypi.rst",
"chars": 5634,
"preview": ".. _pypi_relase:\n\n=============================\nPackaging the pysteps project\n=============================\n\nThe `Python"
},
{
"path": "doc/source/developer_guide/test_pysteps.rst",
"chars": 1484,
"preview": ".. _testing_pysteps:\n\n===============\nTesting pysteps\n===============\n\nThe pysteps distribution includes a small test su"
},
{
"path": "doc/source/developer_guide/update_conda_forge.rst",
"chars": 7927,
"preview": ".. _update_conda_feedstock:\n\n==========================================\nUpdating the conda-forge pysteps-feedstock\n====="
},
{
"path": "doc/source/index.rst",
"chars": 2267,
"preview": "pysteps -- The nowcasting initiative\n====================================\n\nPysteps is a community-driven initiative for "
},
{
"path": "doc/source/pysteps_reference/blending.rst",
"chars": 537,
"preview": "================\npysteps.blending\n================\n\nImplementation of blending methods for blending (ensemble) nowcasts "
},
{
"path": "doc/source/pysteps_reference/cascade.rst",
"chars": 302,
"preview": "===============\npysteps.cascade\n===============\n\nMethods for constructing bandpass filters and decomposing 2d precipitat"
},
{
"path": "doc/source/pysteps_reference/datasets.rst",
"chars": 32,
"preview": ".. automodule:: pysteps.datasets"
},
{
"path": "doc/source/pysteps_reference/decorators.rst",
"chars": 35,
"preview": ".. automodule:: pysteps.decorators\n"
},
{
"path": "doc/source/pysteps_reference/downscaling.rst",
"chars": 221,
"preview": "===================\npysteps.downscaling\n===================\n\nImplementation of deterministic and ensemble downscaling me"
},
{
"path": "doc/source/pysteps_reference/extrapolation.rst",
"chars": 217,
"preview": "=====================\npysteps.extrapolation\n=====================\n\nExtrapolation module functions and interfaces.\n\n.. au"
},
{
"path": "doc/source/pysteps_reference/feature.rst",
"chars": 258,
"preview": "===============\npysteps.feature\n===============\n\nImplementations of feature detection methods.\n\n\n.. automodule:: pysteps"
},
{
"path": "doc/source/pysteps_reference/index.rst",
"chars": 699,
"preview": ".. _pysteps-reference:\n\nAPI Reference\n=============\n\n:Release: |version|\n:Date: |today|\n\nThis page gives an comprehensiv"
},
{
"path": "doc/source/pysteps_reference/io.rst",
"chars": 364,
"preview": "==========\npysteps.io\n==========\n\nMethods for browsing data archives, reading 2d precipitation fields and writing \nforec"
},
{
"path": "doc/source/pysteps_reference/motion.rst",
"chars": 326,
"preview": "==============\npysteps.motion\n==============\n\nImplementations of optical flow methods.\n\n\n.. automodule:: pysteps.motion."
},
{
"path": "doc/source/pysteps_reference/noise.rst",
"chars": 267,
"preview": "=============\npysteps.noise\n=============\n\nImplementation of deterministic and ensemble nowcasting methods.\n\n\n.. automod"
},
{
"path": "doc/source/pysteps_reference/nowcasts.rst",
"chars": 499,
"preview": "================\npysteps.nowcasts\n================\n\nImplementation of deterministic and ensemble nowcasting methods.\n\n\n."
},
{
"path": "doc/source/pysteps_reference/postprocessing.rst",
"chars": 220,
"preview": "======================\npysteps.postprocessing\n======================\n\nMethods for post-processing of forecasts.\n\n\n.. aut"
},
{
"path": "doc/source/pysteps_reference/pysteps.rst",
"chars": 125,
"preview": "=======\npysteps\n=======\n\nPystep top module utils\n\n.. autosummary::\n :toctree: ../generated/\n\n pysteps.load_config_"
},
{
"path": "doc/source/pysteps_reference/timeseries.rst",
"chars": 203,
"preview": "==================\npysteps.timeseries\n==================\n\nMethods and models for time series analysis.\n\n\n.. automodule::"
},
{
"path": "doc/source/pysteps_reference/tracking.rst",
"chars": 228,
"preview": "================\npysteps.tracking\n================\n\nImplementations of feature tracking methods.\n\n\n.. automodule:: pyste"
},
{
"path": "doc/source/pysteps_reference/utils.rst",
"chars": 607,
"preview": "=============\npysteps.utils\n=============\n\nImplementation of miscellaneous utility functions.\n\n\n.. automodule:: pysteps."
},
{
"path": "doc/source/pysteps_reference/verification.rst",
"chars": 576,
"preview": "====================\npysteps.verification\n====================\n\nMethods for verification of deterministic, probabilistic"
},
{
"path": "doc/source/pysteps_reference/visualization.rst",
"chars": 463,
"preview": "=====================\npysteps.visualization\n=====================\n\nMethods for plotting precipitation and motion fields."
},
{
"path": "doc/source/references.bib",
"chars": 13315,
"preview": "\n@TECHREPORT{BPS2004,\n AUTHOR = \"N. E. Bowler and C. E. Pierce and A. W. Seed\",\n TITLE = \"{STEPS}: A probabilistic pre"
},
{
"path": "doc/source/user_guide/example_data.rst",
"chars": 2957,
"preview": ".. _example_data:\n\nInstalling the example data\n===========================\n\nThe examples scripts in the user guide, as w"
},
{
"path": "doc/source/user_guide/install_pysteps.rst",
"chars": 8083,
"preview": ".. _install_pysteps:\n\nInstalling pysteps\n==================\n\nDependencies\n------------\n\nThe pysteps package needs the fo"
},
{
"path": "doc/source/user_guide/machine_learning_pysteps.rst",
"chars": 6638,
"preview": ".. _machine_learning_pysteps:\n\nBenchmarking machine learning models with pysteps\n======================================="
},
{
"path": "doc/source/user_guide/pystepsrc_example.rst",
"chars": 3142,
"preview": ".. _pystepsrc_example:\n\nExample of pystepsrc file\n=========================\n\nBelow you can find the default pystepsrc fi"
},
{
"path": "doc/source/user_guide/set_pystepsrc.rst",
"chars": 5198,
"preview": ".. _pystepsrc:\n\nThe pysteps configuration file (pystepsrc)\n==========================================\n\n.. _JSON: https:/"
},
{
"path": "doc/source/zz_bibliography.rst",
"chars": 88,
"preview": ".. _bibliography:\n\n============\nBibliography\n============\n\n\n.. bibliography::\n :all:\n"
},
{
"path": "environment.yml",
"chars": 185,
"preview": "name: pysteps\nchannels:\n- conda-forge\n- defaults\ndependencies:\n - python>=3.10\n - jsmin\n - jsonschema\n - matplotlib\n"
},
{
"path": "environment_dev.yml",
"chars": 455,
"preview": "# pysteps development environment\nname: pysteps_dev\nchannels:\n - conda-forge\n - defaults\ndependencies:\n - python>=3.1"
},
{
"path": "examples/LK_buffer_mask.py",
"chars": 8990,
"preview": "# -*- coding: utf-8 -*-\r\n\"\"\"\r\nHandling of no-data in Lucas-Kanade\r\n===================================\r\n\r\nAreas of missi"
},
{
"path": "examples/README.txt",
"chars": 265,
"preview": ".. _example_gallery:\n\nExample gallery\n===============\n\nBelow is a collection of example scripts and tutorials to illustr"
},
{
"path": "examples/advection_correction.py",
"chars": 5629,
"preview": "\"\"\"\r\nAdvection correction\r\n====================\r\n\r\nThis tutorial shows how to use the optical flow routines of pysteps t"
},
{
"path": "examples/anvil_nowcast.py",
"chars": 7258,
"preview": "# coding: utf-8\n\n\"\"\"\nANVIL nowcast\n=============\n\nThis example demonstrates how to use ANVIL and the advantages compared"
},
{
"path": "examples/data_transformations.py",
"chars": 7504,
"preview": "# -*- coding: utf-8 -*-\r\n\"\"\"\r\nData transformations\r\n====================\r\n\r\nThe statistics of intermittent precipitation"
},
{
"path": "examples/ens_kalman_filter_blended_forecast.py",
"chars": 12627,
"preview": "# -*- coding: utf-8 -*-\n\"\"\"\nEnsemble-based Blending\n=======================\n\nThis tutorial demonstrates how to construct"
},
{
"path": "examples/linda_nowcasts.py",
"chars": 5388,
"preview": "#!/bin/env python\n\"\"\"\nLINDA nowcasts\n==============\n\nThis example shows how to compute and plot a deterministic and ense"
},
{
"path": "examples/my_first_nowcast.ipynb",
"chars": 29472,
"preview": "{\n \"cells\": [\n {\n \"cell_type\": \"markdown\",\n \"metadata\": {\n \"colab_type\": \"text\",\n \"id\": \"L_dntwSQBnbK\"\n },\n"
},
{
"path": "examples/optical_flow_methods_convergence.py",
"chars": 12612,
"preview": "# coding: utf-8\n\n\"\"\"\nOptical flow methods convergence\n================================\n\nIn this example we test the conv"
},
{
"path": "examples/plot_cascade_decomposition.py",
"chars": 4575,
"preview": "#!/bin/env python\n\"\"\"\nCascade decomposition\n=====================\n\nThis example script shows how to compute and plot the"
},
{
"path": "examples/plot_custom_precipitation_range.py",
"chars": 4926,
"preview": "#!/bin/env python\n\"\"\"\nPlot precipitation using custom colormap\n=============\n\nThis tutorial shows how to plot data using"
},
{
"path": "examples/plot_ensemble_verification.py",
"chars": 5822,
"preview": "#!/bin/env python\n\"\"\"\nEnsemble verification\n=====================\n\nIn this tutorial we perform a verification of a proba"
},
{
"path": "examples/plot_extrapolation_nowcast.py",
"chars": 4377,
"preview": "#!/bin/env python\n\"\"\"\nExtrapolation nowcast\n=====================\n\nThis tutorial shows how to compute and plot an extrap"
},
{
"path": "examples/plot_linear_blending.py",
"chars": 9688,
"preview": "# -*- coding: utf-8 -*-\n\n\"\"\"\nLinear blending\n===============\n\nThis tutorial shows how to construct a simple linear blend"
},
{
"path": "examples/plot_noise_generators.py",
"chars": 5670,
"preview": "#!/bin/env python\r\n\"\"\"\r\nGeneration of stochastic noise\r\n==============================\r\n\r\nThis example script shows how "
},
{
"path": "examples/plot_optical_flow.py",
"chars": 5965,
"preview": "\"\"\"\r\nOptical flow\r\n============\r\n\r\nThis tutorial offers a short overview of the optical flow routines available in\r\npyst"
},
{
"path": "examples/plot_steps_nowcast.py",
"chars": 6258,
"preview": "#!/bin/env python\n\"\"\"\nSTEPS nowcast\n=============\n\nThis tutorial shows how to compute and plot an ensemble nowcast using"
},
{
"path": "examples/probability_forecast.py",
"chars": 4653,
"preview": "#!/bin/env python\n\"\"\"\nProbability forecasts\n=====================\n\nThis example script shows how to forecast the probabi"
},
{
"path": "examples/rainfarm_downscale.py",
"chars": 6311,
"preview": "#!/bin/env python\r\n\"\"\"\r\nPrecipitation downscaling with RainFARM\r\n=======================================\r\n\r\nThis example"
},
{
"path": "examples/steps_blended_forecast.py",
"chars": 16063,
"preview": "# -*- coding: utf-8 -*-\n\"\"\"\nBlended forecast\n====================\n\nThis tutorial shows how to construct a blended foreca"
},
{
"path": "examples/thunderstorm_detection_and_tracking.py",
"chars": 6762,
"preview": "#!/usr/bin/env python3\n# -*- coding: utf-8 -*-\n\"\"\"\nThunderstorm Detection and Tracking - T-DaTing\n======================"
},
{
"path": "pyproject.toml",
"chars": 623,
"preview": "[build-system]\nrequires = [\n \"wheel\",\n \"setuptools>=40.8.0\",\n \"Cython>=0.29.2\",\n \"numpy>=1.13\"\n]\n# setuptool"
},
{
"path": "pysteps/__init__.py",
"chars": 6470,
"preview": "import json\nimport os\nimport stat\nimport sys\nimport warnings\n\nfrom jsmin import jsmin\nfrom jsonschema import Draft4Valid"
},
{
"path": "pysteps/blending/__init__.py",
"chars": 199,
"preview": "# -*- coding: utf-8 -*-\n\"\"\"Methods for blending NWP model(s) with nowcasts.\"\"\"\n\nfrom pysteps.blending.interface import g"
},
{
"path": "pysteps/blending/clim.py",
"chars": 7305,
"preview": "\"\"\"\npysteps.blending.clim\n=====================\n\nModule with methods to read, write and compute past and climatological "
},
{
"path": "pysteps/blending/ens_kalman_filter_methods.py",
"chars": 27431,
"preview": "# -*- coding: utf-8 -*-\n\"\"\"\npysteps.blending.ens_kalman_filter_methods\n=============================================\nMet"
},
{
"path": "pysteps/blending/interface.py",
"chars": 3066,
"preview": "# -*- coding: utf-8 -*-\n\"\"\"\npysteps.blending.interface\n==========================\nInterface for the blending module. It "
},
{
"path": "pysteps/blending/linear_blending.py",
"chars": 13906,
"preview": "# -*- coding: utf-8 -*-\n\"\"\"\npysteps.nowcasts.linear_blending\n================================\n\nLinear blending method to"
},
{
"path": "pysteps/blending/pca_ens_kalman_filter.py",
"chars": 75371,
"preview": "# -*- coding: utf-8 -*-\n\"\"\"\npysteps.blending.pca_ens_kalman_filter\n======================================\n\nImplementatio"
},
{
"path": "pysteps/blending/skill_scores.py",
"chars": 10881,
"preview": "# -*- coding: utf-8 -*-\n\"\"\"\npysteps.blending.skill_scores\n==============================\n\nMethods for computing skill sc"
},
{
"path": "pysteps/blending/steps.py",
"chars": 187776,
"preview": "# -*- coding: utf-8 -*-\n\"\"\"\npysteps.blending.steps\n======================\n\nImplementation of the STEPS stochastic blendi"
},
{
"path": "pysteps/blending/utils.py",
"chars": 23274,
"preview": "# -*- coding: utf-8 -*-\n\"\"\"\npysteps.blending.utils\n======================\n\nModule with common utilities used by the blen"
},
{
"path": "pysteps/cascade/__init__.py",
"chars": 180,
"preview": "# -*- coding: utf-8 -*-\n\"\"\"\nMethods for constructing bandpass filters and decomposing 2d precipitation\nfields into diffe"
},
{
"path": "pysteps/cascade/bandpass_filters.py",
"chars": 7145,
"preview": "\"\"\"\npysteps.cascade.bandpass_filters\n================================\n\nBandpass filters for separating different spatial"
},
{
"path": "pysteps/cascade/decomposition.py",
"chars": 12158,
"preview": "\"\"\"\npysteps.cascade.decomposition\n=============================\n\nMethods for decomposing two-dimensional fields into mul"
},
{
"path": "pysteps/cascade/interface.py",
"chars": 2630,
"preview": "\"\"\"\npysteps.cascade.interface\n=========================\n\nInterface for the cascade module.\n\n.. autosummary::\n :toctre"
},
{
"path": "pysteps/datasets.py",
"chars": 15198,
"preview": "# -*- coding: utf-8 -*-\n\"\"\"\npysteps.datasets\n================\n\nUtilities to download the pysteps data and to create a de"
},
{
"path": "pysteps/decorators.py",
"chars": 10941,
"preview": "# -*- coding: utf-8 -*-\n\"\"\"\npysteps.decorators\n==================\n\nDecorators used to define reusable building blocks th"
},
{
"path": "pysteps/downscaling/__init__.py",
"chars": 151,
"preview": "# -*- coding: utf-8 -*-\n\"\"\"Implementations of deterministic and ensemble downscaling methods.\"\"\"\n\nfrom pysteps.downscali"
},
{
"path": "pysteps/downscaling/interface.py",
"chars": 1685,
"preview": "\"\"\"\npysteps.downscaling.interface\n=============================\n\nInterface for the downscaling module. It returns a call"
},
{
"path": "pysteps/downscaling/rainfarm.py",
"chars": 11183,
"preview": "# -*- coding: utf-8 -*-\n\"\"\"\npysteps.downscaling.rainfarm\n============================\n\nImplementation of the RainFARM st"
},
{
"path": "pysteps/exceptions.py",
"chars": 438,
"preview": "# -*- coding: utf-8 -*-\n\n# Custom pySteps exceptions\n\n\nclass MissingOptionalDependency(Exception):\n \"\"\"Raised when an"
},
{
"path": "pysteps/extrapolation/__init__.py",
"chars": 294,
"preview": "# -*- coding: utf-8 -*-\n\"\"\"\n Methods for advection-based extrapolation of precipitation fields.\nCurrently the module "
},
{
"path": "pysteps/extrapolation/interface.py",
"chars": 5185,
"preview": "# -*- coding: utf-8 -*-\n\"\"\"\npysteps.extrapolation.interface\n===============================\n\nThe functions in the extrap"
},
{
"path": "pysteps/extrapolation/semilagrangian.py",
"chars": 9554,
"preview": "\"\"\"\npysteps.extrapolation.semilagrangian\n====================================\n\nImplementation of the semi-Lagrangian met"
},
{
"path": "pysteps/feature/__init__.py",
"chars": 126,
"preview": "# -*- coding: utf-8 -*-\n\"\"\"Implementations of feature detection methods.\"\"\"\n\nfrom pysteps.feature.interface import get_m"
},
{
"path": "pysteps/feature/blob.py",
"chars": 3654,
"preview": "\"\"\"\npysteps.feature.blob\n====================\n\nBlob detection methods.\n\n.. autosummary::\n :toctree: ../generated/\n\n "
},
{
"path": "pysteps/feature/interface.py",
"chars": 2794,
"preview": "\"\"\"\npysteps.feature.interface\n=========================\n\nInterface for the feature detection module. It returns a callab"
},
{
"path": "pysteps/feature/shitomasi.py",
"chars": 5595,
"preview": "\"\"\"\npysteps.feature.shitomasi\n=========================\n\nShi-Tomasi features detection method to detect corners in an im"
},
{
"path": "pysteps/feature/tstorm.py",
"chars": 10466,
"preview": "\"\"\"\npysteps.feature.tstorm\n======================\n\nThunderstorm cell detection module, part of Thunderstorm Detection an"
},
{
"path": "pysteps/io/__init__.py",
"chars": 334,
"preview": "# -*- coding: utf-8 -*-\n\"\"\"\nMethods for browsing data archives, reading 2d precipitation fields and writing\nforecasts in"
},
{
"path": "pysteps/io/archive.py",
"chars": 3864,
"preview": "# -*- coding: utf-8 -*-\n\"\"\"\npysteps.io.archive\n==================\n\nUtilities for finding archived files that match the g"
},
{
"path": "pysteps/io/exporters.py",
"chars": 35504,
"preview": "# -*- coding: utf-8 -*-\n\"\"\"\npysteps.io.exporters\n====================\n\nMethods for exporting forecasts of 2d precipitati"
},
{
"path": "pysteps/io/importers.py",
"chars": 71512,
"preview": "\"\"\"\npysteps.io.importers\n====================\n\nMethods for importing files containing two-dimensional radar mosaics.\n\nTh"
},
{
"path": "pysteps/io/interface.py",
"chars": 10200,
"preview": "# -*- coding: utf-8 -*-\n\"\"\"\npysteps.io.interface\n====================\n\nInterface for the io module.\n\n.. currentmodule:: "
},
{
"path": "pysteps/io/mch_lut_8bit_Metranet_AZC_V104.txt",
"chars": 7452,
"preview": " DN Red Gre Blue mm\n 0 0 0 0 0.00\n 1 147 163 160 0.04\n 2 145 161 161 0.07\n 3 "
},
{
"path": "pysteps/io/mch_lut_8bit_Metranet_v103.txt",
"chars": 6754,
"preview": "Index R G B mm/h\n0\t255\t255\t255 -10.0\n1\t235\t235\t235 0.0001 \n2\t145\t161\t161 0.10\n3\t143\t"
},
{
"path": "pysteps/io/nowcast_importers.py",
"chars": 9784,
"preview": "# -*- coding: utf-8 -*-\n\"\"\"\npysteps.io.nowcast_importers\n============================\n\nMethods for importing nowcast fil"
},
{
"path": "pysteps/io/readers.py",
"chars": 2483,
"preview": "# -*- coding: utf-8 -*-\n\"\"\"\npysteps.io.readers\n==================\n\nModule with the reader functions.\n\n.. autosummary::\n "
},
{
"path": "pysteps/motion/__init__.py",
"chars": 107,
"preview": "# -*- coding: utf-8 -*-\n\"\"\"\nImplementations of optical flow methods.\"\"\"\n\nfrom .interface import get_method\n"
},
{
"path": "pysteps/motion/_proesmans.pyx",
"chars": 12040,
"preview": "# -*- coding: utf-8 -*-\n\n\"\"\"\nCython module for the Proesmans optical flow algorithm\n\"\"\"\n\n#from cython.parallel import pa"
},
{
"path": "pysteps/motion/_vet.pyx",
"chars": 23293,
"preview": "# -*- coding: utf-8 -*-\n\n\"\"\"\nCython module for morphing and cost functions implementations used in\nin the Variation Echo"
},
{
"path": "pysteps/motion/constant.py",
"chars": 1584,
"preview": "# -*- coding: utf-8 -*-\n\"\"\"\npysteps.motion.constant\n=======================\n\nImplementation of a constant advection fiel"
},
{
"path": "pysteps/motion/darts.py",
"chars": 6762,
"preview": "# -*- coding: utf-8 -*-\n\"\"\"\npysteps.motion.darts\n====================\n\nImplementation of the DARTS algorithm.\n\n.. autosu"
},
{
"path": "pysteps/motion/farneback.py",
"chars": 9558,
"preview": "# -*- coding: utf-8 -*-\n\"\"\"\npysteps.motion.farneback\n========================\n\nThe Farneback dense optical flow module.\n"
},
{
"path": "pysteps/motion/interface.py",
"chars": 5198,
"preview": "# -*- coding: utf-8 -*-\n\"\"\"\npysteps.motion.interface\n========================\n\nInterface for the motion module. It retur"
},
{
"path": "pysteps/motion/lucaskanade.py",
"chars": 9900,
"preview": "# -*- coding: utf-8 -*-\n\"\"\"\npysteps.motion.lucaskanade\n==========================\n\nThe Lucas-Kanade (LK) local feature t"
},
{
"path": "pysteps/motion/proesmans.py",
"chars": 2799,
"preview": "# -*- coding: utf-8 -*-\n\"\"\"\npysteps.motion.proesmans\n========================\n\nImplementation of the anisotropic diffusi"
},
{
"path": "pysteps/motion/vet.py",
"chars": 21349,
"preview": "# -*- coding: utf-8 -*-\n\"\"\"\npysteps.motion.vet\n==================\n\nVariational Echo Tracking (VET) Module\n\nThis module i"
},
{
"path": "pysteps/noise/__init__.py",
"chars": 199,
"preview": "# -*- coding: utf-8 -*-\n\"\"\"\nMethods for generating stochastic perturbations of 2d precipitation and\nvelocity fields.\n\"\"\""
},
{
"path": "pysteps/noise/fftgenerators.py",
"chars": 32006,
"preview": "\"\"\"\npysteps.noise.fftgenerators\n===========================\n\nMethods for noise generators based on FFT filtering of whit"
},
{
"path": "pysteps/noise/interface.py",
"chars": 3464,
"preview": "# -*- coding: utf-8 -*-\n\"\"\"\npysteps.noise.interface\n=======================\n\nInterface for the noise module.\n\n.. autosum"
},
{
"path": "pysteps/noise/motion.py",
"chars": 5818,
"preview": "# -*- coding: utf-8 -*-\n\"\"\"\npysteps.noise.motion\n====================\n\nMethods for generating perturbations of two-dimen"
},
{
"path": "pysteps/noise/utils.py",
"chars": 4088,
"preview": "# -*- coding: utf-8 -*-\n\"\"\"\npysteps.noise.utils\n===================\n\nMiscellaneous utility functions related to generati"
},
{
"path": "pysteps/nowcasts/__init__.py",
"chars": 123,
"preview": "\"\"\"Implementations of deterministic and ensemble nowcasting methods.\"\"\"\n\nfrom pysteps.nowcasts.interface import get_meth"
},
{
"path": "pysteps/nowcasts/anvil.py",
"chars": 17646,
"preview": "\"\"\"\npysteps.nowcasts.anvil\n======================\n\nImplementation of the autoregressive nowcasting using VIL (ANVIL) now"
},
{
"path": "pysteps/nowcasts/extrapolation.py",
"chars": 3689,
"preview": "\"\"\"\npysteps.nowcasts.extrapolation\n==============================\n\nImplementation of extrapolation-based nowcasting meth"
},
{
"path": "pysteps/nowcasts/interface.py",
"chars": 4809,
"preview": "r\"\"\"\npysteps.nowcasts.interface\n==========================\n\nInterface for the nowcasts module. It returns a callable fun"
},
{
"path": "pysteps/nowcasts/lagrangian_probability.py",
"chars": 4087,
"preview": "\"\"\"\npysteps.nowcasts.lagrangian_probability\n=======================================\n\nImplementation of the local Lagrang"
},
{
"path": "pysteps/nowcasts/linda.py",
"chars": 49011,
"preview": "\"\"\"\npysteps.nowcasts.linda\n======================\n\nThis module implements the Lagrangian INtegro-Difference equation mod"
},
{
"path": "pysteps/nowcasts/sprog.py",
"chars": 15452,
"preview": "\"\"\"\npysteps.nowcasts.sprog\n======================\n\nImplementation of the S-PROG method described in :cite:`Seed2003`\n\n.."
},
{
"path": "pysteps/nowcasts/sseps.py",
"chars": 39197,
"preview": "\"\"\"\npysteps.nowcasts.sseps\n======================\n\nImplementation of the Short-space ensemble prediction system (SSEPS) "
},
{
"path": "pysteps/nowcasts/steps.py",
"chars": 66671,
"preview": "\"\"\"\npysteps.nowcasts.steps\n======================\n\nImplementation of the STEPS stochastic nowcasting method as described"
},
{
"path": "pysteps/nowcasts/utils.py",
"chars": 22341,
"preview": "\"\"\"\npysteps.nowcasts.utils\n======================\n\nModule with common utilities used by nowcasts methods.\n\n.. autosummar"
},
{
"path": "pysteps/postprocessing/__init__.py",
"chars": 182,
"preview": "# -*- coding: utf-8 -*-\n\"\"\"Methods for post-processing of forecasts.\"\"\"\n\nfrom . import ensemblestats\nfrom .diagnostics i"
},
{
"path": "pysteps/postprocessing/diagnostics.py",
"chars": 518,
"preview": "\"\"\"\npysteps.postprocessing.diagnostics\n====================\n\nMethods for applying diagnostics postprocessing.\n\nThe metho"
},
{
"path": "pysteps/postprocessing/ensemblestats.py",
"chars": 4652,
"preview": "# -*- coding: utf-8 -*-\n\"\"\"\npysteps.postprocessing.ensemblestats\n====================================\n\nMethods for the c"
},
{
"path": "pysteps/postprocessing/interface.py",
"chars": 8283,
"preview": "# -*- coding: utf-8 -*-\n\"\"\"\npysteps.postprocessing.interface\n====================\n\nInterface for the postprocessing modu"
},
{
"path": "pysteps/postprocessing/probmatching.py",
"chars": 12048,
"preview": "# -*- coding: utf-8 -*-\n\"\"\"\npysteps.postprocessing.probmatching\n===================================\n\nMethods for matchin"
},
{
"path": "pysteps/pystepsrc",
"chars": 5275,
"preview": "// pysteps configuration\n{\n // \"silent_import\" : whether to suppress the initial pysteps message\n \"silent_import\":"
},
{
"path": "pysteps/pystepsrc_schema.json",
"chars": 2204,
"preview": "{\n \"title\": \"pystepsrc params\",\n \"description\": \"Pysteps default parameters\",\n \"required\": [\n \"outputs\","
},
{
"path": "pysteps/scripts/__init__.py",
"chars": 120,
"preview": "# -*- coding: utf-8 -*-\n\"\"\"\nStandalone utility scripts for pysteps (e.g. parameter estimation from the\ngiven data).\n\"\"\"\n"
},
{
"path": "pysteps/scripts/fit_vel_pert_params.py",
"chars": 2898,
"preview": "# -*- coding: utf-8 -*-\n\"\"\"Fit STEPS motion perturbation parameters to the output of run_vel_pert_analysis.py\nand option"
},
{
"path": "pysteps/scripts/run_vel_pert_analysis.py",
"chars": 5604,
"preview": "# -*- coding: utf-8 -*-\n\"\"\"Analyze uncertainty of motion field with increasing lead time. The analyses\nare done by compa"
},
{
"path": "pysteps/tests/__init__.py",
"chars": 0,
"preview": ""
},
{
"path": "pysteps/tests/helpers.py",
"chars": 6995,
"preview": "\"\"\"\nTesting helper functions\n=======================\n\nCollection of helper functions for the testing suite.\n\"\"\"\n\nfrom da"
},
{
"path": "pysteps/tests/test_archive.py",
"chars": 741,
"preview": "# -*- coding: utf-8 -*-\n\nimport pytest\nfrom datetime import datetime\n\nfrom pysteps.io.archive import _generate_path\n\ntes"
},
{
"path": "pysteps/tests/test_blending_clim.py",
"chars": 4396,
"preview": "# -*- coding: utf-8 -*-\n\n\nfrom datetime import datetime, timedelta\nfrom os.path import join, exists\nimport pickle\nimport"
},
{
"path": "pysteps/tests/test_blending_linear_blending.py",
"chars": 8166,
"preview": "# -*- coding: utf-8 -*-\n\nimport numpy as np\nimport pytest\nfrom pysteps.blending.linear_blending import forecast, _get_ra"
},
{
"path": "pysteps/tests/test_blending_pca_ens_kalman_filter.py",
"chars": 11076,
"preview": "# -*- coding: utf-8 -*-\n\nimport datetime\n\nimport numpy as np\nimport pytest\n\nfrom pysteps import blending, motion, utils\n"
},
{
"path": "pysteps/tests/test_blending_skill_scores.py",
"chars": 9138,
"preview": "# -*- coding: utf-8 -*-\n\nimport numpy as np\nimport pytest\nfrom numpy.testing import assert_array_almost_equal\n\nfrom pyst"
},
{
"path": "pysteps/tests/test_blending_steps.py",
"chars": 19779,
"preview": "# -*- coding: utf-8 -*-\n\nimport datetime\n\nimport numpy as np\nimport pytest\n\nimport pysteps\nfrom pysteps import blending,"
},
{
"path": "pysteps/tests/test_blending_utils.py",
"chars": 13767,
"preview": "# -*- coding: utf-8 -*-\n\nimport os\n\nimport numpy as np\nimport pytest\nfrom numpy.testing import assert_array_almost_equal"
},
{
"path": "pysteps/tests/test_cascade.py",
"chars": 2172,
"preview": "# -*- coding: utf-8 -*-\n\nimport os\n\nimport numpy as np\nimport pytest\nfrom numpy.testing import assert_array_almost_equal"
},
{
"path": "pysteps/tests/test_datasets.py",
"chars": 2535,
"preview": "# -*- coding: utf-8 -*-\nimport os\nfrom tempfile import TemporaryDirectory\n\nimport pytest\nfrom _pytest.outcomes import Sk"
},
{
"path": "pysteps/tests/test_decorators.py",
"chars": 697,
"preview": "# -*- coding: utf-8 -*-\nimport time\n\nfrom pysteps.decorators import memoize\n\n\ndef test_memoize():\n @memoize(maxsize=1"
},
{
"path": "pysteps/tests/test_downscaling_rainfarm.py",
"chars": 3705,
"preview": "# -*- coding: utf-8 -*-\n\nimport pytest\nimport numpy as np\nfrom pysteps import downscaling\nfrom pysteps.tests.helpers imp"
},
{
"path": "pysteps/tests/test_ensscores.py",
"chars": 1813,
"preview": "# -*- coding: utf-8 -*-\n\nimport numpy as np\nimport pytest\nfrom numpy.testing import assert_array_almost_equal\n\nfrom pyst"
},
{
"path": "pysteps/tests/test_exporters.py",
"chars": 6759,
"preview": "# -*- coding: utf-8 -*-\n\nimport os\nimport tempfile\nfrom datetime import datetime\n\nimport numpy as np\nimport pytest\nfrom "
},
{
"path": "pysteps/tests/test_extrapolation_semilagrangian.py",
"chars": 1906,
"preview": "# -*- coding: utf-8 -*-\nimport numpy as np\nimport pytest\nfrom numpy.testing import assert_array_almost_equal\n\nfrom pyste"
},
{
"path": "pysteps/tests/test_feature.py",
"chars": 1038,
"preview": "import pytest\nimport numpy as np\nfrom pysteps import feature\nfrom pysteps.tests.helpers import get_precipitation_fields\n"
},
{
"path": "pysteps/tests/test_feature_tstorm.py",
"chars": 3596,
"preview": "import numpy as np\nimport pytest\n\nfrom pysteps.feature.tstorm import detection\nfrom pysteps.utils import to_reflectivity"
},
{
"path": "pysteps/tests/test_importer_decorator.py",
"chars": 1149,
"preview": "# -*- coding: utf-8 -*-\nfrom functools import partial\n\nimport numpy as np\nimport pytest\n\nfrom pysteps.tests.helpers impo"
},
{
"path": "pysteps/tests/test_interfaces.py",
"chars": 13894,
"preview": "# -*- coding: utf-8 -*-\n\nimport numpy\nimport pytest\n\nimport pysteps\n\n\ndef _generic_interface_test(method_getter, valid_n"
},
{
"path": "pysteps/tests/test_io_archive.py",
"chars": 814,
"preview": "from datetime import datetime\n\nimport pytest\n\nimport pysteps\n\n\ndef test_find_by_date_mch():\n pytest.importorskip(\"PIL"
},
{
"path": "pysteps/tests/test_io_bom_rf3.py",
"chars": 3013,
"preview": "# -*- coding: utf-8 -*-\n\nimport os\n\nimport pytest\n\nimport pysteps\nfrom pysteps.tests.helpers import smart_assert\n\nnetCDF"
},
{
"path": "pysteps/tests/test_io_dwd_hdf5.py",
"chars": 1756,
"preview": "# -*- coding: utf-8 -*-\n\nimport pytest\n\nimport pysteps\nfrom pysteps.tests.helpers import smart_assert, get_precipitation"
},
{
"path": "pysteps/tests/test_io_fmi_geotiff.py",
"chars": 1296,
"preview": "import os\n\nimport pytest\n\nimport pysteps\nfrom pysteps.tests.helpers import smart_assert\n\npytest.importorskip(\"pyproj\")\np"
},
{
"path": "pysteps/tests/test_io_fmi_pgm.py",
"chars": 2883,
"preview": "import os\n\nimport pytest\n\nimport pysteps\nfrom pysteps.tests.helpers import smart_assert\n\npytest.importorskip(\"pyproj\")\n\n"
},
{
"path": "pysteps/tests/test_io_knmi_hdf5.py",
"chars": 1543,
"preview": "# -*- coding: utf-8 -*-\n\nimport os\n\nimport pytest\n\nimport pysteps\nfrom pysteps.tests.helpers import smart_assert\n\npytest"
},
{
"path": "pysteps/tests/test_io_mch_gif.py",
"chars": 2191,
"preview": "# -*- coding: utf-8 -*-\n\nimport os\n\nimport pytest\n\nimport pysteps\nfrom pysteps.tests.helpers import smart_assert\n\npytest"
},
{
"path": "pysteps/tests/test_io_mrms_grib.py",
"chars": 2566,
"preview": "# -*- coding: utf-8 -*-\n\nimport os\n\nimport numpy as np\nimport pytest\nfrom numpy.testing import assert_array_almost_equal"
},
{
"path": "pysteps/tests/test_io_nowcast_importers.py",
"chars": 1488,
"preview": "import numpy as np\nimport pytest\n\nfrom pysteps import io\nfrom pysteps.tests.helpers import get_precipitation_fields\n\npre"
},
{
"path": "pysteps/tests/test_io_opera_hdf5.py",
"chars": 6362,
"preview": "# -*- coding: utf-8 -*-\n\nimport os\n\nimport pytest\n\nimport pysteps\nfrom pysteps.tests.helpers import smart_assert\n\npytest"
},
{
"path": "pysteps/tests/test_io_readers.py",
"chars": 1035,
"preview": "from datetime import datetime\n\nimport numpy as np\nimport pytest\n\nimport pysteps\n\n\ndef test_read_timeseries_mch():\n py"
},
{
"path": "pysteps/tests/test_io_saf_crri.py",
"chars": 2847,
"preview": "# -*- coding: utf-8 -*-\n\nimport os\n\nimport pytest\n\nimport pysteps\nfrom pysteps.tests.helpers import smart_assert\n\npytest"
},
{
"path": "pysteps/tests/test_motion.py",
"chars": 13304,
"preview": "# coding: utf-8\n\n\"\"\"\nTest the convergence of the optical flow methods available in\npySTEPS using idealized motion fields"
},
{
"path": "pysteps/tests/test_motion_farneback.py",
"chars": 3540,
"preview": "import pytest\nimport numpy as np\n\nfrom pysteps.motion import farneback\nfrom pysteps.exceptions import MissingOptionalDep"
},
{
"path": "pysteps/tests/test_motion_lk.py",
"chars": 2738,
"preview": "# coding: utf-8\n\n\"\"\" \"\"\"\n\nimport pytest\nimport numpy as np\n\nfrom pysteps import motion, verification\nfrom pysteps.tests."
},
{
"path": "pysteps/tests/test_noise_fftgenerators.py",
"chars": 1795,
"preview": "import numpy as np\n\nfrom pysteps.noise import fftgenerators\nfrom pysteps.tests.helpers import get_precipitation_fields\n\n"
},
{
"path": "pysteps/tests/test_noise_motion.py",
"chars": 1941,
"preview": "# -*- coding: utf-8 -*-\n\nimport numpy as np\nimport pytest\nfrom numpy.testing import assert_array_almost_equal\n\nfrom pyst"
},
{
"path": "pysteps/tests/test_nowcasts_anvil.py",
"chars": 2907,
"preview": "import numpy as np\nimport pytest\n\nfrom pysteps import motion, nowcasts, verification\nfrom pysteps.tests.helpers import g"
},
{
"path": "pysteps/tests/test_nowcasts_lagrangian_probability.py",
"chars": 2672,
"preview": "# -*- coding: utf-8 -*-\nimport numpy as np\nimport pytest\n\nfrom pysteps.nowcasts.lagrangian_probability import forecast\nf"
},
{
"path": "pysteps/tests/test_nowcasts_linda.py",
"chars": 7679,
"preview": "from datetime import timedelta\nimport os\nimport numpy as np\nimport pytest\n\nfrom pysteps import io, motion, nowcasts, ver"
},
{
"path": "pysteps/tests/test_nowcasts_sprog.py",
"chars": 2801,
"preview": "# -*- coding: utf-8 -*-\n\nimport numpy as np\nimport pytest\n\nfrom pysteps import motion, nowcasts, verification\nfrom pyste"
},
{
"path": "pysteps/tests/test_nowcasts_sseps.py",
"chars": 3043,
"preview": "# -*- coding: utf-8 -*-\n\nimport numpy as np\nimport pytest\n\nfrom pysteps import motion, nowcasts, verification\nfrom pyste"
},
{
"path": "pysteps/tests/test_nowcasts_steps.py",
"chars": 5897,
"preview": "import os\nfrom datetime import timedelta\n\nimport numpy as np\nimport pytest\n\nfrom pysteps import io, motion, nowcasts, ve"
},
{
"path": "pysteps/tests/test_nowcasts_utils.py",
"chars": 1500,
"preview": "import numpy as np\nimport pytest\n\nfrom pysteps import motion\nfrom pysteps.nowcasts import utils as nowcast_utils\nfrom py"
},
{
"path": "pysteps/tests/test_paramsrc.py",
"chars": 2656,
"preview": "# -*- coding: utf-8 -*-\nimport os\n\nfrom tempfile import NamedTemporaryFile\n\nimport pysteps\nfrom pysteps import load_conf"
},
{
"path": "pysteps/tests/test_plt_animate.py",
"chars": 2466,
"preview": "# -*- coding: utf-8 -*-\n\nimport os\n\nimport numpy as np\nimport pytest\nfrom unittest.mock import patch\n\nfrom pysteps.tests"
},
{
"path": "pysteps/tests/test_plt_cartopy.py",
"chars": 1493,
"preview": "# -*- coding: utf-8 -*-\n\nimport pytest\n\nfrom pysteps.visualization import plot_precip_field\nfrom pysteps.utils import to"
},
{
"path": "pysteps/tests/test_plt_motionfields.py",
"chars": 3367,
"preview": "# -*- coding: utf-8 -*-\n\nimport matplotlib.pyplot as plt\nimport numpy as np\nimport pytest\n\nfrom pysteps import motion\nfr"
}
]
// ... and 64 more files (download for full content)
About this extraction
This page contains the full source code of the pySTEPS/pysteps GitHub repository, extracted and formatted as plain text for AI agents and large language models (LLMs). The extraction includes 264 files (1.9 MB), approximately 493.2k tokens, and a symbol index with 737 extracted functions, classes, methods, constants, and types. Use this with OpenClaw, Claude, ChatGPT, Cursor, Windsurf, or any other AI tool that accepts text input. You can copy the full output to your clipboard or download it as a .txt file.
Extracted by GitExtract — free GitHub repo to text converter for AI. Built by Nikandr Surkov.